DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] ethdev: check consistency of per port offloads
@ 2018-02-01 13:53 Wei Dai
  2018-03-28  8:57 ` [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads Wei Dai
  0 siblings, 1 reply; 60+ messages in thread
From: Wei Dai @ 2018-02-01 13:53 UTC (permalink / raw)
  To: thomas, shahafs; +Cc: dev, Wei Dai

A per port offloading feature should be enabled or
disabled at same time in both rte_eth_dev_configure( )
and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
This patch check if a per port offloading flag has
same configuration in rte_eth_dev_configure( ) and
rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
This patch can make such checking in a common way in
rte_ethdev layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 lib/librte_ether/rte_ethdev.c | 70 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 78bed1a..7945890 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1404,6 +1404,44 @@ rte_eth_dev_is_removed(uint16_t port_id)
 	return ret;
 }
 
+/**
+* Check if the Rx/Tx queue offloading settings is valid
+* @param queue_offloads
+*   offloads input to rte_eth_rx_queue_setup( ) or rte_eth_tx_queue_setup( )
+* @param port_offloads
+*   Rx or Tx offloads input to rte_eth_dev_configure( )
+* @param queue_offload_capa
+*   rx_queue_offload_capa or tx_queue_offload_capa in struct rte_eth_dev_ifnfo
+*   got from rte_eth_dev_info_get( )
+* @param all_offload_capa
+*   rx_offload_capa or tx_offload_capa in struct rte_eth_dev_info
+*   got from rte_eth_dev_info_get( )
+*
+* @return
+*   Nonzero when per-queue offloading setting is valid
+*/
+static int
+rte_eth_check_queue_offloads(uint64_t queue_offloads,
+			     uint64_t port_offloads,
+			     uint64_t queue_offload_capa,
+			     uint64_t all_offload_capa)
+{
+	uint64_t pure_port_capa = all_offload_capa ^ queue_offload_capa;
+
+	return !((port_offloads ^ queue_offloads) & pure_port_capa);
+}
+
+static int
+rte_eth_check_rx_queue_offloads(uint64_t rx_queue_offloads,
+				const struct rte_eth_rxmode *rxmode,
+				const struct rte_eth_dev_info *dev_info)
+{
+	return rte_eth_check_queue_offloads(rx_queue_offloads,
+					    rxmode->offloads,
+					    dev_info->rx_queue_offload_capa,
+					    dev_info->rx_offload_capa);
+}
+
 int
 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		       uint16_t nb_rx_desc, unsigned int socket_id,
@@ -1446,6 +1484,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 				(int) sizeof(struct rte_pktmbuf_pool_private));
 		return -ENOSPC;
 	}
+
 	mbp_buf_size = rte_pktmbuf_data_room_size(mp);
 
 	if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
@@ -1495,6 +1534,16 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	if (!rte_eth_check_rx_queue_offloads(local_conf.offloads,
+		&dev->data->dev_conf.rxmode, &dev_info)) {
+		RTE_PMD_DEBUG_TRACE("%p : Rx queue offloads ox%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, local_conf.offloads,
+			dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1555,6 +1604,17 @@ rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
 	*txq_flags = flags;
 }
 
+static int
+rte_eth_check_tx_queue_offloads(uint64_t tx_queue_offloads,
+				const struct rte_eth_txmode *txmode,
+				const struct rte_eth_dev_info *dev_info)
+{
+	return rte_eth_check_queue_offloads(tx_queue_offloads,
+					    txmode->offloads,
+					    dev_info->tx_queue_offload_capa,
+					    dev_info->tx_offload_capa);
+}
+
 int
 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 		       uint16_t nb_tx_desc, unsigned int socket_id,
@@ -1622,6 +1682,16 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	if (!rte_eth_check_tx_queue_offloads(local_conf.offloads,
+		&dev->data->dev_conf.txmode, &dev_info)) {
+		RTE_PMD_DEBUG_TRACE("%p : Tx queue offloads ox%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, local_conf.offloads,
+			dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads
  2018-02-01 13:53 [dpdk-dev] [PATCH] ethdev: check consistency of per port offloads Wei Dai
@ 2018-03-28  8:57 ` Wei Dai
  2018-04-13 17:31   ` Ferruh Yigit
                     ` (3 more replies)
  0 siblings, 4 replies; 60+ messages in thread
From: Wei Dai @ 2018-03-28  8:57 UTC (permalink / raw)
  To: thomas; +Cc: dev, Wei Dai

This patch check if a requested offloading
is supported in the device capability.
A per port offloading feature should be enabled or
disabled at same time in both rte_eth_dev_configure( )
and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
This patch check if a per port offloading flag has
same configuration in rte_eth_dev_configure( ) and
rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
This patch can make such checking in a common way in
rte_ethdev layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>

---
v2: add offlaods checking in rte_eth_dev_configure( ).
    check if a requested offloading is supported
---
 lib/librte_ether/rte_ethdev.c | 100 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 100 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 0590f0c..a04a705 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1152,6 +1152,27 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1404,6 +1425,50 @@ rte_eth_dev_is_removed(uint16_t port_id)
 	return ret;
 }
 
+/**
+* Check if the Rx/Tx queue offloading settings is valid
+* @param queue_offloads
+*   offloads input to rte_eth_rx_queue_setup( ) or rte_eth_tx_queue_setup( )
+* @param port_offloads
+*   Rx or Tx offloads input to rte_eth_dev_configure( )
+* @param queue_offload_capa
+*   rx_queue_offload_capa or tx_queue_offload_capa in struct rte_eth_dev_ifnfo
+*   got from rte_eth_dev_info_get( )
+* @param all_offload_capa
+*   rx_offload_capa or tx_offload_capa in struct rte_eth_dev_info
+*   got from rte_eth_dev_info_get( )
+*
+* @return
+*   Nonzero when per-queue offloading setting is valid
+*/
+static int
+rte_eth_check_queue_offloads(uint64_t queue_offloads,
+			     uint64_t port_offloads,
+			     uint64_t queue_offload_capa,
+			     uint64_t all_offload_capa)
+{
+	uint64_t pure_port_capa = all_offload_capa ^ queue_offload_capa;
+
+	if ((queue_offloads & all_offload_capa) != queue_offloads)
+		return 0;
+
+	if ((port_offloads ^ queue_offloads) & pure_port_capa)
+		return 0;
+
+	return 1;
+}
+
+static int
+rte_eth_check_rx_queue_offloads(uint64_t rx_queue_offloads,
+				const struct rte_eth_rxmode *rxmode,
+				const struct rte_eth_dev_info *dev_info)
+{
+	return rte_eth_check_queue_offloads(rx_queue_offloads,
+					    rxmode->offloads,
+					    dev_info->rx_queue_offload_capa,
+					    dev_info->rx_offload_capa);
+}
+
 int
 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		       uint16_t nb_rx_desc, unsigned int socket_id,
@@ -1495,6 +1560,18 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	if (!rte_eth_check_rx_queue_offloads(local_conf.offloads,
+		&dev->data->dev_conf.rxmode, &dev_info)) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port %d : Rx queue offloads ox%"
+			PRIx64 " don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64 "\n",
+			port_id,
+			local_conf.offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1555,6 +1632,17 @@ rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
 	*txq_flags = flags;
 }
 
+static int
+rte_eth_check_tx_queue_offloads(uint64_t tx_queue_offloads,
+				const struct rte_eth_txmode *txmode,
+				const struct rte_eth_dev_info *dev_info)
+{
+	return rte_eth_check_queue_offloads(tx_queue_offloads,
+					    txmode->offloads,
+					    dev_info->tx_queue_offload_capa,
+					    dev_info->tx_offload_capa);
+}
+
 int
 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 		       uint16_t nb_tx_desc, unsigned int socket_id,
@@ -1622,6 +1710,18 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	if (!rte_eth_check_tx_queue_offloads(local_conf.offloads,
+		&dev->data->dev_conf.txmode, &dev_info)) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port %d : Tx queue offloads ox%"
+			PRIx64 " don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64 "\n",
+			port_id,
+			local_conf.offloads,
+			dev->data->dev_conf.txmode.offloads,
+			dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.9.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads
  2018-03-28  8:57 ` [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads Wei Dai
@ 2018-04-13 17:31   ` Ferruh Yigit
  2018-04-15 10:37     ` Thomas Monjalon
  2018-04-25 11:26   ` [dpdk-dev] [PATCH] " Wei Dai
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 60+ messages in thread
From: Ferruh Yigit @ 2018-04-13 17:31 UTC (permalink / raw)
  To: Wei Dai, thomas; +Cc: dev

On 3/28/2018 9:57 AM, Wei Dai wrote:
> This patch check if a requested offloading
> is supported in the device capability.
> A per port offloading feature should be enabled or
> disabled at same time in both rte_eth_dev_configure( )
> and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
> This patch check if a per port offloading flag has
> same configuration in rte_eth_dev_configure( ) and
> rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
> This patch can make such checking in a common way in
> rte_ethdev layer to avoid same checking in underlying PMD.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> 

Hi Wei,

I think it is good idea to move common check to the abstraction layer as much as
possible.

But for this case we are targeting an API change in rc2, I believe better wait
that API change for this update.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads
  2018-04-13 17:31   ` Ferruh Yigit
@ 2018-04-15 10:37     ` Thomas Monjalon
  2018-04-16  3:06       ` Dai, Wei
  0 siblings, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-04-15 10:37 UTC (permalink / raw)
  To: Ferruh Yigit, Wei Dai; +Cc: dev

13/04/2018 19:31, Ferruh Yigit:
> On 3/28/2018 9:57 AM, Wei Dai wrote:
> > This patch check if a requested offloading
> > is supported in the device capability.
> > A per port offloading feature should be enabled or
> > disabled at same time in both rte_eth_dev_configure( )
> > and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
> > This patch check if a per port offloading flag has
> > same configuration in rte_eth_dev_configure( ) and
> > rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
> > This patch can make such checking in a common way in
> > rte_ethdev layer to avoid same checking in underlying PMD.
> 
> I think it is good idea to move common check to the abstraction layer as much as
> possible.
> 
> But for this case we are targeting an API change in rc2, I believe better wait
> that API change for this update.

I think Wei could implement some filtering of offload flags:
If an offload is already enabled at port level, we can filter out them
when enabling again at queue level.
By removing such repetition in ethdev, before calling the PMD op,
the PMD does not need to bother for offloads enabled twice.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads
  2018-04-15 10:37     ` Thomas Monjalon
@ 2018-04-16  3:06       ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-04-16  3:06 UTC (permalink / raw)
  To: Thomas Monjalon, Yigit, Ferruh; +Cc: dev

Thanks, Thomas and Ferruh
I think I can implement v3 for this.

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Sunday, April 15, 2018 6:37 PM
> To: Yigit, Ferruh <ferruh.yigit@intel.com>; Dai, Wei <wei.dai@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads
> 
> 13/04/2018 19:31, Ferruh Yigit:
> > On 3/28/2018 9:57 AM, Wei Dai wrote:
> > > This patch check if a requested offloading is supported in the
> > > device capability.
> > > A per port offloading feature should be enabled or disabled at same
> > > time in both rte_eth_dev_configure( ) and rte_eth_rx_queue_setup(
> > > )/rte_eth_tx_queue_setup( ).
> > > This patch check if a per port offloading flag has same
> > > configuration in rte_eth_dev_configure( ) and
> > > rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
> > > This patch can make such checking in a common way in rte_ethdev
> > > layer to avoid same checking in underlying PMD.
> >
> > I think it is good idea to move common check to the abstraction layer
> > as much as possible.
> >
> > But for this case we are targeting an API change in rc2, I believe
> > better wait that API change for this update.
> 
> I think Wei could implement some filtering of offload flags:
> If an offload is already enabled at port level, we can filter out them when
> enabling again at queue level.
> By removing such repetition in ethdev, before calling the PMD op, the PMD
> does not need to bother for offloads enabled twice.
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH] ethdev: check Rx/Tx offloads
  2018-03-28  8:57 ` [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads Wei Dai
  2018-04-13 17:31   ` Ferruh Yigit
@ 2018-04-25 11:26   ` Wei Dai
  2018-04-25 11:31   ` [dpdk-dev] [PATCH v3] " Wei Dai
  2018-04-25 11:50   ` [dpdk-dev] [PATCH v4] " Wei Dai
  3 siblings, 0 replies; 60+ messages in thread
From: Wei Dai @ 2018-04-25 11:26 UTC (permalink / raw)
  To: thomas, ferruh.yigit, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a requested offloading is supported
in the device capability.
Any offloading is disabled by default if it is not set
in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
A per port offloading can only be enabled in
rte_eth_dev_configure(). If a per port offloading is
sent to rte_eth_[rt]x_queue_setup( ), return error.
Only per queue offloading can be sent to
rte_eth_dev_configure( ). A per queue offloading is
enabled only if it is enabled in rte_eth_dev_configure( ) OR
if it is enabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is enabled in rte_eth_dev_configure(),
it can't be disabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is disabled in rte_eth_dev_configure( ),
it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).

This patch can make such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>

---
v3: rework according to dicision of offloading API in community

v2: add offloads checking in rte_eth_dev_configure( ).
    check if a requested offloading is supported.
---
 lib/librte_ether/rte_ethdev.c | 76 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f0f53d4..70a7904 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offload must be within its device capability */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from application.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_rx_queue_setup( )\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1681,6 +1730,33 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from applcation.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_tx_queue_setup( )\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.txmode.offloads;
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v3] ethdev: check Rx/Tx offloads
  2018-03-28  8:57 ` [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads Wei Dai
  2018-04-13 17:31   ` Ferruh Yigit
  2018-04-25 11:26   ` [dpdk-dev] [PATCH] " Wei Dai
@ 2018-04-25 11:31   ` Wei Dai
  2018-04-25 11:49     ` Wei Dai
  2018-04-25 11:50   ` [dpdk-dev] [PATCH v4] " Wei Dai
  3 siblings, 1 reply; 60+ messages in thread
From: Wei Dai @ 2018-04-25 11:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a requested offloading is supported
in the device capability.
Any offloading is disabled by default if it is not set
in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
A per port offloading can only be enabled in
rte_eth_dev_configure(). If a per port offloading is
sent to rte_eth_[rt]x_queue_setup( ), return error.
Only per queue offloading can be sent to
rte_eth_dev_configure( ). A per queue offloading is
enabled only if it is enabled in rte_eth_dev_configure( ) OR
if it is enabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is enabled in rte_eth_dev_configure(),
it can't be disabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is disabled in rte_eth_dev_configure( ),
it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).

This patch can make such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>

---
v3: rework according to dicision of offloading API in community

v2: add offloads checking in rte_eth_dev_configure( ).
    check if a requested offloading is supported.
---
 lib/librte_ether/rte_ethdev.c | 76 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f0f53d4..70a7904 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offload must be within its device capability */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from application.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_rx_queue_setup( )\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1681,6 +1730,33 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from applcation.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_tx_queue_setup( )\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.txmode.offloads;
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v3] ethdev: check Rx/Tx offloads
  2018-04-25 11:31   ` [dpdk-dev] [PATCH v3] " Wei Dai
@ 2018-04-25 11:49     ` Wei Dai
  0 siblings, 0 replies; 60+ messages in thread
From: Wei Dai @ 2018-04-25 11:49 UTC (permalink / raw)
  To: thomas, ferruh.yigit, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a requested offloading is supported
in the device capability.
Any offloading is disabled by default if it is not set
in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
A per port offloading can only be enabled in
rte_eth_dev_configure(). If a per port offloading is
sent to rte_eth_[rt]x_queue_setup( ), return error.
Only per queue offloading can be sent to
rte_eth_dev_configure( ). A per queue offloading is
enabled only if it is enabled in rte_eth_dev_configure( ) OR
if it is enabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is enabled in rte_eth_dev_configure(),
it can't be disabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is disabled in rte_eth_dev_configure( ),
it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).

This patch can make such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>

---
v3: rework according to dicision of offloading API in community

v2: add offloads checking in rte_eth_dev_configure( ).
    check if a requested offloading is supported.
---
 lib/librte_ether/rte_ethdev.c | 76 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f0f53d4..70a7904 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offload must be within its device capability */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from application.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_rx_queue_setup( )\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1681,6 +1730,33 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from applcation.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_tx_queue_setup( )\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.txmode.offloads;
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v4] ethdev: check Rx/Tx offloads
  2018-03-28  8:57 ` [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads Wei Dai
                     ` (2 preceding siblings ...)
  2018-04-25 11:31   ` [dpdk-dev] [PATCH v3] " Wei Dai
@ 2018-04-25 11:50   ` Wei Dai
  2018-04-25 17:04     ` Ferruh Yigit
  2018-04-26 14:37     ` [dpdk-dev] [PATCH v5] " Wei Dai
  3 siblings, 2 replies; 60+ messages in thread
From: Wei Dai @ 2018-04-25 11:50 UTC (permalink / raw)
  To: thomas, ferruh.yigit, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a requested offloading is supported
in the device capability.
Any offloading is disabled by default if it is not set
in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
A per port offloading can only be enabled in
rte_eth_dev_configure(). If a per port offloading is
sent to rte_eth_[rt]x_queue_setup( ), return error.
Only per queue offloading can be sent to
rte_eth_[rt]x_queue_setup( ). A per queue offloading is
enabled only if it is enabled in rte_eth_dev_configure( ) OR
if it is enabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is enabled in rte_eth_dev_configure(),
it can't be disabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is disabled in rte_eth_dev_configure( ),
it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).

This patch can make such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>

---
v4: fix a wrong description in git log message.

v3: rework according to dicision of offloading API in community

v2: add offloads checking in rte_eth_dev_configure( ).
    check if a requested offloading is supported.
---
 lib/librte_ether/rte_ethdev.c | 76 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f0f53d4..70a7904 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offload must be within its device capability */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from application.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_rx_queue_setup( )\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1681,6 +1730,33 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from applcation.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_tx_queue_setup( )\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa);
+		return -EINVAL;
+	}
+
+	/*
+	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled on all queues and can't be disabled here.
+	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
+	 * or disabled here.
+	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
+	 * it is also enabled for all queues here.
+	 */
+	local_conf.offloads |= dev->data->dev_conf.txmode.offloads;
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v4] ethdev: check Rx/Tx offloads
  2018-04-25 11:50   ` [dpdk-dev] [PATCH v4] " Wei Dai
@ 2018-04-25 17:04     ` Ferruh Yigit
  2018-04-26  7:59       ` Zhang, Qi Z
  2018-04-26 14:37     ` [dpdk-dev] [PATCH v5] " Wei Dai
  1 sibling, 1 reply; 60+ messages in thread
From: Ferruh Yigit @ 2018-04-25 17:04 UTC (permalink / raw)
  To: Wei Dai, thomas, qi.z.zhang; +Cc: dev

On 4/25/2018 12:50 PM, Wei Dai wrote:
> This patch check if a requested offloading is supported
> in the device capability.
> Any offloading is disabled by default if it is not set
> in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
> A per port offloading can only be enabled in
> rte_eth_dev_configure(). If a per port offloading is
> sent to rte_eth_[rt]x_queue_setup( ), return error.
> Only per queue offloading can be sent to
> rte_eth_[rt]x_queue_setup( ). A per queue offloading is
> enabled only if it is enabled in rte_eth_dev_configure( ) OR
> if it is enabled in rte_eth_[rt]x_queue_setup( ).
> If a per queue offloading is enabled in rte_eth_dev_configure(),
> it can't be disabled in rte_eth_[rt]x_queue_setup( ).
> If a per queue offloading is disabled in rte_eth_dev_configure( ),
> it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> 
> This patch can make such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.

Hi Wei,

For clarification, there is existing API for rc1, and there is a suggested
update in API for rc2. I guess this patch is for suggested update in rc2?

> Signed-off-by: Wei Dai <wei.dai@intel.com>
> 
> ---
> v4: fix a wrong description in git log message.
> 
> v3: rework according to dicision of offloading API in community
> 
> v2: add offloads checking in rte_eth_dev_configure( ).
>     check if a requested offloading is supported.
> ---
>  lib/librte_ether/rte_ethdev.c | 76 +++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 76 insertions(+)
> 
> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
> index f0f53d4..70a7904 100644
> --- a/lib/librte_ether/rte_ethdev.c
> +++ b/lib/librte_ether/rte_ethdev.c
> @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>  							ETHER_MAX_LEN;
>  	}
>  
> +	/* Any requested offload must be within its device capability */
> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.rxmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
> +				    "0x%" PRIx64 " doesn't match Rx offloads "
> +				    "capability 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.rxmode.offloads,
> +				    dev_info.rx_offload_capa);
> +		return -EINVAL;
> +	}
> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.txmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
> +				    "0x%" PRIx64 " doesn't match Tx offloads "
> +				    "capability 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.txmode.offloads,
> +				    dev_info.tx_offload_capa);
> +		return -EINVAL;
> +	}
+1 having these checks here.

> +
>  	/*
>  	 * Setup new number of RX/TX queues and reconfigure device.
>  	 */
> @@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  						    &local_conf.offloads);
>  	}
>  
> +	/*
> +	 * Only per-queue offload can be enabled from application.
> +	 * If any pure per-port offload is sent to this function, return -EINVAL
> +	 */
> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in rte_eth_rx_queue_setup( )\n",
> +				    port_id,
> +				    rx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.rx_queue_offload_capa);
> +		return -EINVAL;
> +	}

There is a change here. If requested offload is already enabled in port level,
instead of returning error, ignore it.
So this removes the restriction for apps that "only an offload from queue
capabilities can be send for queue_setup() functions". This is not requirement
for application as it has been before, but this is allowed for app now.

If app tried to enable a port offload in queue level that is not already
enabled, it should still return error.

> +
> +	/*
> +	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
> +	 * it is also enabled on all queues and can't be disabled here.
> +	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
> +	 * or disabled here.
> +	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
> +	 * it is also enabled for all queues here.
> +	 */
> +	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;

I didn't get this one, why add rxmode.offloads into queue offloads?

Based on above change Thomas has an suggestion [1]:

"
In the case of offload already enabled at port level
and repeated in queue setup,
ethdev can avoid passing it to the PMD queue setup function.
"

So almost reverse of what you are doing, strip rxmode.offloads from
local_conf.offloads for PMDs. What do you think?


[1]
https://dpdk.org/ml/archives/dev/2018-April/098956.html

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v4] ethdev: check Rx/Tx offloads
  2018-04-25 17:04     ` Ferruh Yigit
@ 2018-04-26  7:59       ` Zhang, Qi Z
  2018-04-26  8:18         ` Thomas Monjalon
  0 siblings, 1 reply; 60+ messages in thread
From: Zhang, Qi Z @ 2018-04-26  7:59 UTC (permalink / raw)
  To: Yigit, Ferruh, Dai, Wei, thomas; +Cc: dev



> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Thursday, April 26, 2018 1:05 AM
> To: Dai, Wei <wei.dai@intel.com>; thomas@monjalon.net; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v4] ethdev: check Rx/Tx offloads
> 
> On 4/25/2018 12:50 PM, Wei Dai wrote:
> > This patch check if a requested offloading is supported in the device
> > capability.
> > Any offloading is disabled by default if it is not set in
> > rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
> > A per port offloading can only be enabled in rte_eth_dev_configure().
> > If a per port offloading is sent to rte_eth_[rt]x_queue_setup( ),
> > return error.
> > Only per queue offloading can be sent to rte_eth_[rt]x_queue_setup( ).
> > A per queue offloading is enabled only if it is enabled in
> > rte_eth_dev_configure( ) OR if it is enabled in
> > rte_eth_[rt]x_queue_setup( ).
> > If a per queue offloading is enabled in rte_eth_dev_configure(), it
> > can't be disabled in rte_eth_[rt]x_queue_setup( ).
> > If a per queue offloading is disabled in rte_eth_dev_configure( ), it
> > can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> >
> > This patch can make such checking in a common way in rte_ethdev layer
> > to avoid same checking in underlying PMD.
> 
> Hi Wei,
> 
> For clarification, there is existing API for rc1, and there is a suggested update
> in API for rc2. I guess this patch is for suggested update in rc2?
> 
> > Signed-off-by: Wei Dai <wei.dai@intel.com>
> >
> > ---
> > v4: fix a wrong description in git log message.
> >
> > v3: rework according to dicision of offloading API in community
> >
> > v2: add offloads checking in rte_eth_dev_configure( ).
> >     check if a requested offloading is supported.
> > ---
> >  lib/librte_ether/rte_ethdev.c | 76
> > +++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 76 insertions(+)
> >
> > diff --git a/lib/librte_ether/rte_ethdev.c
> > b/lib/librte_ether/rte_ethdev.c index f0f53d4..70a7904 100644
> > --- a/lib/librte_ether/rte_ethdev.c
> > +++ b/lib/librte_ether/rte_ethdev.c
> > @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> >  							ETHER_MAX_LEN;
> >  	}
> >
> > +	/* Any requested offload must be within its device capability */
> > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > +	     local_conf.rxmode.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> offloads "
> > +				    "0x%" PRIx64 " doesn't match Rx offloads "
> > +				    "capability 0x%" PRIx64 "\n",
> > +				    port_id,
> > +				    local_conf.rxmode.offloads,
> > +				    dev_info.rx_offload_capa);
> > +		return -EINVAL;
> > +	}
> > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > +	     local_conf.txmode.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> offloads "
> > +				    "0x%" PRIx64 " doesn't match Tx offloads "
> > +				    "capability 0x%" PRIx64 "\n",
> > +				    port_id,
> > +				    local_conf.txmode.offloads,
> > +				    dev_info.tx_offload_capa);
> > +		return -EINVAL;
> > +	}
> +1 having these checks here.
> 
> > +
> >  	/*
> >  	 * Setup new number of RX/TX queues and reconfigure device.
> >  	 */
> > @@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> uint16_t rx_queue_id,
> >  						    &local_conf.offloads);
> >  	}
> >
> > +	/*
> > +	 * Only per-queue offload can be enabled from application.
> > +	 * If any pure per-port offload is sent to this function, return -EINVAL
> > +	 */
> > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > +	     local_conf.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
> > +				    "Requested offload 0x%" PRIx64 "doesn't "
> > +				    "match per-queue capability 0x%" PRIx64
> > +				    " in rte_eth_rx_queue_setup( )\n",
> > +				    port_id,
> > +				    rx_queue_id,
> > +				    local_conf.offloads,
> > +				    dev_info.rx_queue_offload_capa);
> > +		return -EINVAL;
> > +	}
> 
> There is a change here. If requested offload is already enabled in port level,
> instead of returning error, ignore it.
> So this removes the restriction for apps that "only an offload from queue
> capabilities can be send for queue_setup() functions". This is not
> requirement for application as it has been before, but this is allowed for app
> now.
> 
> If app tried to enable a port offload in queue level that is not already enabled,
> it should still return error.
> 
> > +
> > +	/*
> > +	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
> > +	 * it is also enabled on all queues and can't be disabled here.
> > +	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
> > +	 * or disabled here.
> > +	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
> > +	 * it is also enabled for all queues here.
> > +	 */
> > +	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> 
> I didn't get this one, why add rxmode.offloads into queue offloads?
> 
> Based on above change Thomas has an suggestion [1]:
> 
> "
> In the case of offload already enabled at port level and repeated in queue
> setup, ethdev can avoid passing it to the PMD queue setup function.
> "
> 
> So almost reverse of what you are doing, strip rxmode.offloads from
> local_conf.offloads for PMDs. What do you think?

Should we do like below
	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
	local_conf.offloads &= dev_info.rx_queue_offload_capa

I thinks it's better to only strip port offloads. But keep all queue offload,
 since this is exact we going to configure the queue and during device start, it can simply iterate on each bit on local_conf.offloads to
turn on queue offload and don't need to worry about rxmode.offloads.

Regards
Qi.

> 
> 
> [1]
> https://dpdk.org/ml/archives/dev/2018-April/098956.html

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v4] ethdev: check Rx/Tx offloads
  2018-04-26  7:59       ` Zhang, Qi Z
@ 2018-04-26  8:18         ` Thomas Monjalon
  2018-04-26  8:51           ` Zhang, Qi Z
  0 siblings, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-04-26  8:18 UTC (permalink / raw)
  To: Zhang, Qi Z; +Cc: Yigit, Ferruh, Dai, Wei, dev

26/04/2018 09:59, Zhang, Qi Z:
> 
> > -----Original Message-----
> > From: Yigit, Ferruh
> > Sent: Thursday, April 26, 2018 1:05 AM
> > To: Dai, Wei <wei.dai@intel.com>; thomas@monjalon.net; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: dev@dpdk.org
> > Subject: Re: [PATCH v4] ethdev: check Rx/Tx offloads
> > 
> > On 4/25/2018 12:50 PM, Wei Dai wrote:
> > > This patch check if a requested offloading is supported in the device
> > > capability.
> > > Any offloading is disabled by default if it is not set in
> > > rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
> > > A per port offloading can only be enabled in rte_eth_dev_configure().
> > > If a per port offloading is sent to rte_eth_[rt]x_queue_setup( ),
> > > return error.
> > > Only per queue offloading can be sent to rte_eth_[rt]x_queue_setup( ).
> > > A per queue offloading is enabled only if it is enabled in
> > > rte_eth_dev_configure( ) OR if it is enabled in
> > > rte_eth_[rt]x_queue_setup( ).
> > > If a per queue offloading is enabled in rte_eth_dev_configure(), it
> > > can't be disabled in rte_eth_[rt]x_queue_setup( ).
> > > If a per queue offloading is disabled in rte_eth_dev_configure( ), it
> > > can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> > >
> > > This patch can make such checking in a common way in rte_ethdev layer
> > > to avoid same checking in underlying PMD.
> > 
> > Hi Wei,
> > 
> > For clarification, there is existing API for rc1, and there is a suggested update
> > in API for rc2. I guess this patch is for suggested update in rc2?
> > 
> > > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > >
> > > ---
> > > v4: fix a wrong description in git log message.
> > >
> > > v3: rework according to dicision of offloading API in community
> > >
> > > v2: add offloads checking in rte_eth_dev_configure( ).
> > >     check if a requested offloading is supported.
> > > ---
> > >  lib/librte_ether/rte_ethdev.c | 76
> > > +++++++++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 76 insertions(+)
> > >
> > > diff --git a/lib/librte_ether/rte_ethdev.c
> > > b/lib/librte_ether/rte_ethdev.c index f0f53d4..70a7904 100644
> > > --- a/lib/librte_ether/rte_ethdev.c
> > > +++ b/lib/librte_ether/rte_ethdev.c
> > > @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> > >  							ETHER_MAX_LEN;
> > >  	}
> > >
> > > +	/* Any requested offload must be within its device capability */
> > > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > > +	     local_conf.rxmode.offloads) {
> > > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> > offloads "
> > > +				    "0x%" PRIx64 " doesn't match Rx offloads "
> > > +				    "capability 0x%" PRIx64 "\n",
> > > +				    port_id,
> > > +				    local_conf.rxmode.offloads,
> > > +				    dev_info.rx_offload_capa);
> > > +		return -EINVAL;
> > > +	}
> > > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > > +	     local_conf.txmode.offloads) {
> > > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> > offloads "
> > > +				    "0x%" PRIx64 " doesn't match Tx offloads "
> > > +				    "capability 0x%" PRIx64 "\n",
> > > +				    port_id,
> > > +				    local_conf.txmode.offloads,
> > > +				    dev_info.tx_offload_capa);
> > > +		return -EINVAL;
> > > +	}
> > +1 having these checks here.
> > 
> > > +
> > >  	/*
> > >  	 * Setup new number of RX/TX queues and reconfigure device.
> > >  	 */
> > > @@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> > uint16_t rx_queue_id,
> > >  						    &local_conf.offloads);
> > >  	}
> > >
> > > +	/*
> > > +	 * Only per-queue offload can be enabled from application.
> > > +	 * If any pure per-port offload is sent to this function, return -EINVAL
> > > +	 */
> > > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > > +	     local_conf.offloads) {
> > > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
> > > +				    "Requested offload 0x%" PRIx64 "doesn't "
> > > +				    "match per-queue capability 0x%" PRIx64
> > > +				    " in rte_eth_rx_queue_setup( )\n",
> > > +				    port_id,
> > > +				    rx_queue_id,
> > > +				    local_conf.offloads,
> > > +				    dev_info.rx_queue_offload_capa);
> > > +		return -EINVAL;
> > > +	}
> > 
> > There is a change here. If requested offload is already enabled in port level,
> > instead of returning error, ignore it.
> > So this removes the restriction for apps that "only an offload from queue
> > capabilities can be send for queue_setup() functions". This is not
> > requirement for application as it has been before, but this is allowed for app
> > now.
> > 
> > If app tried to enable a port offload in queue level that is not already enabled,
> > it should still return error.
> > 
> > > +
> > > +	/*
> > > +	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
> > > +	 * it is also enabled on all queues and can't be disabled here.
> > > +	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
> > > +	 * or disabled here.
> > > +	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
> > > +	 * it is also enabled for all queues here.
> > > +	 */
> > > +	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> > 
> > I didn't get this one, why add rxmode.offloads into queue offloads?
> > 
> > Based on above change Thomas has an suggestion [1]:
> > 
> > "
> > In the case of offload already enabled at port level and repeated in queue
> > setup, ethdev can avoid passing it to the PMD queue setup function.
> > "
> > 
> > So almost reverse of what you are doing, strip rxmode.offloads from
> > local_conf.offloads for PMDs. What do you think?
> 
> Should we do like below
> 	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> 	local_conf.offloads &= dev_info.rx_queue_offload_capa
> 
> I thinks it's better to only strip port offloads. But keep all queue offload,
>  since this is exact we going to configure the queue and during device start, it can simply iterate on each bit on local_conf.offloads to
> turn on queue offload and don't need to worry about rxmode.offloads.

No
The offloads which are already enabled at port level does not need to be
enabled again at queue level.
But the PMD can decide to not configure the offload at port level for real,
and configure the port offloads in every queue setups.
It is an implementation choice, and can be different per-offload.
So it is simpler to filter such request for queue setups.
This way, we will be sure that all offloads, requested in queue setup PMD
function, must be setup for the queue.
The PMD implementation will need to setup all the requested offloads
in queue setup, plus the port offloads which were deferred to all queues.

Hope it's clear.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v4] ethdev: check Rx/Tx offloads
  2018-04-26  8:18         ` Thomas Monjalon
@ 2018-04-26  8:51           ` Zhang, Qi Z
  2018-04-26 14:45             ` Dai, Wei
  0 siblings, 1 reply; 60+ messages in thread
From: Zhang, Qi Z @ 2018-04-26  8:51 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: Yigit, Ferruh, Dai, Wei, dev



> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Thursday, April 26, 2018 4:19 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; Dai, Wei <wei.dai@intel.com>;
> dev@dpdk.org
> Subject: Re: [PATCH v4] ethdev: check Rx/Tx offloads
> 
> 26/04/2018 09:59, Zhang, Qi Z:
> >
> > > -----Original Message-----
> > > From: Yigit, Ferruh
> > > Sent: Thursday, April 26, 2018 1:05 AM
> > > To: Dai, Wei <wei.dai@intel.com>; thomas@monjalon.net; Zhang, Qi Z
> > > <qi.z.zhang@intel.com>
> > > Cc: dev@dpdk.org
> > > Subject: Re: [PATCH v4] ethdev: check Rx/Tx offloads
> > >
> > > On 4/25/2018 12:50 PM, Wei Dai wrote:
> > > > This patch check if a requested offloading is supported in the
> > > > device capability.
> > > > Any offloading is disabled by default if it is not set in
> > > > rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
> > > > A per port offloading can only be enabled in rte_eth_dev_configure().
> > > > If a per port offloading is sent to rte_eth_[rt]x_queue_setup( ),
> > > > return error.
> > > > Only per queue offloading can be sent to rte_eth_[rt]x_queue_setup( ).
> > > > A per queue offloading is enabled only if it is enabled in
> > > > rte_eth_dev_configure( ) OR if it is enabled in
> > > > rte_eth_[rt]x_queue_setup( ).
> > > > If a per queue offloading is enabled in rte_eth_dev_configure(),
> > > > it can't be disabled in rte_eth_[rt]x_queue_setup( ).
> > > > If a per queue offloading is disabled in rte_eth_dev_configure( ),
> > > > it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> > > >
> > > > This patch can make such checking in a common way in rte_ethdev
> > > > layer to avoid same checking in underlying PMD.
> > >
> > > Hi Wei,
> > >
> > > For clarification, there is existing API for rc1, and there is a
> > > suggested update in API for rc2. I guess this patch is for suggested update
> in rc2?
> > >
> > > > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > > >
> > > > ---
> > > > v4: fix a wrong description in git log message.
> > > >
> > > > v3: rework according to dicision of offloading API in community
> > > >
> > > > v2: add offloads checking in rte_eth_dev_configure( ).
> > > >     check if a requested offloading is supported.
> > > > ---
> > > >  lib/librte_ether/rte_ethdev.c | 76
> > > > +++++++++++++++++++++++++++++++++++++++++++
> > > >  1 file changed, 76 insertions(+)
> > > >
> > > > diff --git a/lib/librte_ether/rte_ethdev.c
> > > > b/lib/librte_ether/rte_ethdev.c index f0f53d4..70a7904 100644
> > > > --- a/lib/librte_ether/rte_ethdev.c
> > > > +++ b/lib/librte_ether/rte_ethdev.c
> > > > @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id,
> > > uint16_t nb_rx_q, uint16_t nb_tx_q,
> > > >  							ETHER_MAX_LEN;
> > > >  	}
> > > >
> > > > +	/* Any requested offload must be within its device capability */
> > > > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > > > +	     local_conf.rxmode.offloads) {
> > > > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> > > offloads "
> > > > +				    "0x%" PRIx64 " doesn't match Rx offloads "
> > > > +				    "capability 0x%" PRIx64 "\n",
> > > > +				    port_id,
> > > > +				    local_conf.rxmode.offloads,
> > > > +				    dev_info.rx_offload_capa);
> > > > +		return -EINVAL;
> > > > +	}
> > > > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > > > +	     local_conf.txmode.offloads) {
> > > > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> > > offloads "
> > > > +				    "0x%" PRIx64 " doesn't match Tx offloads "
> > > > +				    "capability 0x%" PRIx64 "\n",
> > > > +				    port_id,
> > > > +				    local_conf.txmode.offloads,
> > > > +				    dev_info.tx_offload_capa);
> > > > +		return -EINVAL;
> > > > +	}
> > > +1 having these checks here.
> > >
> > > > +
> > > >  	/*
> > > >  	 * Setup new number of RX/TX queues and reconfigure device.
> > > >  	 */
> > > > @@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> > > uint16_t rx_queue_id,
> > > >  						    &local_conf.offloads);
> > > >  	}
> > > >
> > > > +	/*
> > > > +	 * Only per-queue offload can be enabled from application.
> > > > +	 * If any pure per-port offload is sent to this function, return
> -EINVAL
> > > > +	 */
> > > > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > > > +	     local_conf.offloads) {
> > > > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> rx_queue_id=%d "
> > > > +				    "Requested offload 0x%" PRIx64 "doesn't "
> > > > +				    "match per-queue capability 0x%" PRIx64
> > > > +				    " in rte_eth_rx_queue_setup( )\n",
> > > > +				    port_id,
> > > > +				    rx_queue_id,
> > > > +				    local_conf.offloads,
> > > > +				    dev_info.rx_queue_offload_capa);
> > > > +		return -EINVAL;
> > > > +	}
> > >
> > > There is a change here. If requested offload is already enabled in
> > > port level, instead of returning error, ignore it.
> > > So this removes the restriction for apps that "only an offload from
> > > queue capabilities can be send for queue_setup() functions". This is
> > > not requirement for application as it has been before, but this is
> > > allowed for app now.
> > >
> > > If app tried to enable a port offload in queue level that is not
> > > already enabled, it should still return error.
> > >
> > > > +
> > > > +	/*
> > > > +	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
> > > > +	 * it is also enabled on all queues and can't be disabled here.
> > > > +	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
> > > > +	 * or disabled here.
> > > > +	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
> > > > +	 * it is also enabled for all queues here.
> > > > +	 */
> > > > +	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> > >
> > > I didn't get this one, why add rxmode.offloads into queue offloads?
> > >
> > > Based on above change Thomas has an suggestion [1]:
> > >
> > > "
> > > In the case of offload already enabled at port level and repeated in
> > > queue setup, ethdev can avoid passing it to the PMD queue setup
> function.
> > > "
> > >
> > > So almost reverse of what you are doing, strip rxmode.offloads from
> > > local_conf.offloads for PMDs. What do you think?
> >
> > Should we do like below
> > 	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> > 	local_conf.offloads &= dev_info.rx_queue_offload_capa
> >
> > I thinks it's better to only strip port offloads. But keep all queue
> > offload,  since this is exact we going to configure the queue and
> > during device start, it can simply iterate on each bit on local_conf.offloads
> to turn on queue offload and don't need to worry about rxmode.offloads.
> 
> No
> The offloads which are already enabled at port level does not need to be
> enabled again at queue level.
> But the PMD can decide to not configure the offload at port level for real,
> and configure the port offloads in every queue setups.
> It is an implementation choice, and can be different per-offload.

OK, got your point, that make sense.

> So it is simpler to filter such request for queue setups.
> This way, we will be sure that all offloads, requested in queue setup PMD
> function, must be setup for the queue.
> The PMD implementation will need to setup all the requested offloads in
> queue setup, plus the port offloads which were deferred to all queues.
> 
> Hope it's clear.
> 
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v5] ethdev: check Rx/Tx offloads
  2018-04-25 11:50   ` [dpdk-dev] [PATCH v4] " Wei Dai
  2018-04-25 17:04     ` Ferruh Yigit
@ 2018-04-26 14:37     ` Wei Dai
  2018-04-26 15:50       ` Ferruh Yigit
  2018-05-03  1:30       ` [dpdk-dev] [PATCH v6] " Wei Dai
  1 sibling, 2 replies; 60+ messages in thread
From: Wei Dai @ 2018-04-26 14:37 UTC (permalink / raw)
  To: ferruh.yigit, thomas, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a requested offloading is supported
in the device capability.
Any offloading is disabled by default if it is not set
in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
A per port offloading can only be enabled in
rte_eth_dev_configure(). If a per port offloading is
sent to rte_eth_[rt]x_queue_setup( ), return error.
Only per queue offloading can be sent to
rte_eth_[rt]x_queue_setup( ). A per queue offloading is
enabled only if it is enabled in rte_eth_dev_configure( ) OR
if it is enabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is enabled in rte_eth_dev_configure(),
it can't be disabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is disabled in rte_eth_dev_configure( ),
it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).

This patch can make such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 lib/librte_ether/rte_ethdev.c | 56 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 56 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f0f53d4..5485f47 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offload must be within its device capability */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1547,6 +1569,23 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from application.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_rx_queue_setup( )\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa);
+		return -EINVAL;
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1681,6 +1720,23 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from applcation.
+	 * If any pure per-port offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in rte_eth_tx_queue_setup( )\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa);
+		return -EINVAL;
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v4] ethdev: check Rx/Tx offloads
  2018-04-26  8:51           ` Zhang, Qi Z
@ 2018-04-26 14:45             ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-04-26 14:45 UTC (permalink / raw)
  To: Zhang, Qi Z, Thomas Monjalon; +Cc: Yigit, Ferruh, dev

Thanks to Thomas, Ferruh and Zhang Qi for your feedback.
I will rework v5 patch to follow your guidance.

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Thursday, April 26, 2018 4:51 PM
> To: Thomas Monjalon <thomas@monjalon.net>
> Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; Dai, Wei <wei.dai@intel.com>;
> dev@dpdk.org
> Subject: RE: [PATCH v4] ethdev: check Rx/Tx offloads
> 
> 
> 
> > -----Original Message-----
> > From: Thomas Monjalon [mailto:thomas@monjalon.net]
> > Sent: Thursday, April 26, 2018 4:19 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>
> > Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; Dai, Wei
> > <wei.dai@intel.com>; dev@dpdk.org
> > Subject: Re: [PATCH v4] ethdev: check Rx/Tx offloads
> >
> > 26/04/2018 09:59, Zhang, Qi Z:
> > >
> > > > -----Original Message-----
> > > > From: Yigit, Ferruh
> > > > Sent: Thursday, April 26, 2018 1:05 AM
> > > > To: Dai, Wei <wei.dai@intel.com>; thomas@monjalon.net; Zhang, Qi Z
> > > > <qi.z.zhang@intel.com>
> > > > Cc: dev@dpdk.org
> > > > Subject: Re: [PATCH v4] ethdev: check Rx/Tx offloads
> > > >
> > > > On 4/25/2018 12:50 PM, Wei Dai wrote:
> > > > > This patch check if a requested offloading is supported in the
> > > > > device capability.
> > > > > Any offloading is disabled by default if it is not set in
> > > > > rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
> > > > > A per port offloading can only be enabled in rte_eth_dev_configure().
> > > > > If a per port offloading is sent to rte_eth_[rt]x_queue_setup(
> > > > > ), return error.
> > > > > Only per queue offloading can be sent to
> rte_eth_[rt]x_queue_setup( ).
> > > > > A per queue offloading is enabled only if it is enabled in
> > > > > rte_eth_dev_configure( ) OR if it is enabled in
> > > > > rte_eth_[rt]x_queue_setup( ).
> > > > > If a per queue offloading is enabled in rte_eth_dev_configure(),
> > > > > it can't be disabled in rte_eth_[rt]x_queue_setup( ).
> > > > > If a per queue offloading is disabled in rte_eth_dev_configure(
> > > > > ), it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> > > > >
> > > > > This patch can make such checking in a common way in rte_ethdev
> > > > > layer to avoid same checking in underlying PMD.
> > > >
> > > > Hi Wei,
> > > >
> > > > For clarification, there is existing API for rc1, and there is a
> > > > suggested update in API for rc2. I guess this patch is for
> > > > suggested update
> > in rc2?
> > > >
> > > > > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > > > >
> > > > > ---
> > > > > v4: fix a wrong description in git log message.
> > > > >
> > > > > v3: rework according to dicision of offloading API in community
> > > > >
> > > > > v2: add offloads checking in rte_eth_dev_configure( ).
> > > > >     check if a requested offloading is supported.
> > > > > ---
> > > > >  lib/librte_ether/rte_ethdev.c | 76
> > > > > +++++++++++++++++++++++++++++++++++++++++++
> > > > >  1 file changed, 76 insertions(+)
> > > > >
> > > > > diff --git a/lib/librte_ether/rte_ethdev.c
> > > > > b/lib/librte_ether/rte_ethdev.c index f0f53d4..70a7904 100644
> > > > > --- a/lib/librte_ether/rte_ethdev.c
> > > > > +++ b/lib/librte_ether/rte_ethdev.c
> > > > > @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id,
> > > > uint16_t nb_rx_q, uint16_t nb_tx_q,
> > > > >  							ETHER_MAX_LEN;
> > > > >  	}
> > > > >
> > > > > +	/* Any requested offload must be within its device capability */
> > > > > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > > > > +	     local_conf.rxmode.offloads) {
> > > > > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> > > > offloads "
> > > > > +				    "0x%" PRIx64 " doesn't match Rx offloads "
> > > > > +				    "capability 0x%" PRIx64 "\n",
> > > > > +				    port_id,
> > > > > +				    local_conf.rxmode.offloads,
> > > > > +				    dev_info.rx_offload_capa);
> > > > > +		return -EINVAL;
> > > > > +	}
> > > > > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > > > > +	     local_conf.txmode.offloads) {
> > > > > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> > > > offloads "
> > > > > +				    "0x%" PRIx64 " doesn't match Tx offloads "
> > > > > +				    "capability 0x%" PRIx64 "\n",
> > > > > +				    port_id,
> > > > > +				    local_conf.txmode.offloads,
> > > > > +				    dev_info.tx_offload_capa);
> > > > > +		return -EINVAL;
> > > > > +	}
> > > > +1 having these checks here.
> > > >
> > > > > +
> > > > >  	/*
> > > > >  	 * Setup new number of RX/TX queues and reconfigure device.
> > > > >  	 */
> > > > > @@ -1547,6 +1569,33 @@ rte_eth_rx_queue_setup(uint16_t
> port_id,
> > > > uint16_t rx_queue_id,
> > > > >  						    &local_conf.offloads);
> > > > >  	}
> > > > >
> > > > > +	/*
> > > > > +	 * Only per-queue offload can be enabled from application.
> > > > > +	 * If any pure per-port offload is sent to this function,
> > > > > +return
> > -EINVAL
> > > > > +	 */
> > > > > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > > > > +	     local_conf.offloads) {
> > > > > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> > rx_queue_id=%d "
> > > > > +				    "Requested offload 0x%" PRIx64 "doesn't "
> > > > > +				    "match per-queue capability 0x%" PRIx64
> > > > > +				    " in rte_eth_rx_queue_setup( )\n",
> > > > > +				    port_id,
> > > > > +				    rx_queue_id,
> > > > > +				    local_conf.offloads,
> > > > > +				    dev_info.rx_queue_offload_capa);
> > > > > +		return -EINVAL;
> > > > > +	}
> > > >
> > > > There is a change here. If requested offload is already enabled in
> > > > port level, instead of returning error, ignore it.
> > > > So this removes the restriction for apps that "only an offload
> > > > from queue capabilities can be send for queue_setup() functions".
> > > > This is not requirement for application as it has been before, but
> > > > this is allowed for app now.
> > > >
> > > > If app tried to enable a port offload in queue level that is not
> > > > already enabled, it should still return error.
> > > >
> > > > > +
> > > > > +	/*
> > > > > +	 * If a per-queue offload is enabled in rte_eth_dev_configure( ),
> > > > > +	 * it is also enabled on all queues and can't be disabled here.
> > > > > +	 * If it is diabled in rte_eth_dev_configure( ), it can be enabled
> > > > > +	 * or disabled here.
> > > > > +	 * If a per-port offload is enabled in rte_eth_dev_configure( ),
> > > > > +	 * it is also enabled for all queues here.
> > > > > +	 */
> > > > > +	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> > > >
> > > > I didn't get this one, why add rxmode.offloads into queue offloads?
> > > >
> > > > Based on above change Thomas has an suggestion [1]:
> > > >
> > > > "
> > > > In the case of offload already enabled at port level and repeated
> > > > in queue setup, ethdev can avoid passing it to the PMD queue setup
> > function.
> > > > "
> > > >
> > > > So almost reverse of what you are doing, strip rxmode.offloads
> > > > from local_conf.offloads for PMDs. What do you think?
> > >
> > > Should we do like below
> > > 	local_conf.offloads |= dev->data->dev_conf.rxmode.offloads;
> > > 	local_conf.offloads &= dev_info.rx_queue_offload_capa
> > >
> > > I thinks it's better to only strip port offloads. But keep all queue
> > > offload,  since this is exact we going to configure the queue and
> > > during device start, it can simply iterate on each bit on
> > > local_conf.offloads
> > to turn on queue offload and don't need to worry about rxmode.offloads.
> >
> > No
> > The offloads which are already enabled at port level does not need to
> > be enabled again at queue level.
> > But the PMD can decide to not configure the offload at port level for
> > real, and configure the port offloads in every queue setups.
> > It is an implementation choice, and can be different per-offload.
> 
> OK, got your point, that make sense.
> 
> > So it is simpler to filter such request for queue setups.
> > This way, we will be sure that all offloads, requested in queue setup
> > PMD function, must be setup for the queue.
> > The PMD implementation will need to setup all the requested offloads
> > in queue setup, plus the port offloads which were deferred to all queues.
> >
> > Hope it's clear.
> >
> >

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v5] ethdev: check Rx/Tx offloads
  2018-04-26 14:37     ` [dpdk-dev] [PATCH v5] " Wei Dai
@ 2018-04-26 15:50       ` Ferruh Yigit
  2018-04-26 15:56         ` Thomas Monjalon
  2018-04-26 16:11         ` Ferruh Yigit
  2018-05-03  1:30       ` [dpdk-dev] [PATCH v6] " Wei Dai
  1 sibling, 2 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-04-26 15:50 UTC (permalink / raw)
  To: Wei Dai, thomas, qi.z.zhang; +Cc: dev

On 4/26/2018 3:37 PM, Wei Dai wrote:
> This patch check if a requested offloading is supported
> in the device capability.
> Any offloading is disabled by default if it is not set
> in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
> A per port offloading can only be enabled in
> rte_eth_dev_configure(). If a per port offloading is
> sent to rte_eth_[rt]x_queue_setup( ), return error.
> Only per queue offloading can be sent to
> rte_eth_[rt]x_queue_setup( ). A per queue offloading is
> enabled only if it is enabled in rte_eth_dev_configure( ) OR
> if it is enabled in rte_eth_[rt]x_queue_setup( ).
> If a per queue offloading is enabled in rte_eth_dev_configure(),
> it can't be disabled in rte_eth_[rt]x_queue_setup( ).
> If a per queue offloading is disabled in rte_eth_dev_configure( ),
> it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> 
> This patch can make such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> ---
>  lib/librte_ether/rte_ethdev.c | 56 +++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 56 insertions(+)
> 
> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
> index f0f53d4..5485f47 100644
> --- a/lib/librte_ether/rte_ethdev.c
> +++ b/lib/librte_ether/rte_ethdev.c
> @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>  							ETHER_MAX_LEN;
>  	}
>  
> +	/* Any requested offload must be within its device capability */
> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.rxmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
> +				    "0x%" PRIx64 " doesn't match Rx offloads "
> +				    "capability 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.rxmode.offloads,
> +				    dev_info.rx_offload_capa);
> +		return -EINVAL;
> +	}
> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.txmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
> +				    "0x%" PRIx64 " doesn't match Tx offloads "
> +				    "capability 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.txmode.offloads,
> +				    dev_info.tx_offload_capa);
> +		return -EINVAL;
> +	}
> +
>  	/*
>  	 * Setup new number of RX/TX queues and reconfigure device.
>  	 */
> @@ -1547,6 +1569,23 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  						    &local_conf.offloads);
>  	}
>  
> +	/*
> +	 * Only per-queue offload can be enabled from application.
> +	 * If any pure per-port offload is sent to this function, return -EINVAL

This comment doesn't match below check, below doesn't check pure per-port offload

> +	 */
> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> +	     local_conf.offloads) {> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in rte_eth_rx_queue_setup( )\n",
> +				    port_id,
> +				    rx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.rx_queue_offload_capa);
> +		return -EINVAL;
> +	}

Lets add one more check and consider this for rc1, later we can update checks
for suggested API in rc2.

Check to add (this is for existing API)
If a pure per-port update enabled configure, it should be enabled in
queue_setup() as well.


> +
>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>  					      socket_id, &local_conf, mp);
>  	if (!ret) {
> @@ -1681,6 +1720,23 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
>  					  &local_conf.offloads);
>  	}
>  
> +	/*
> +	 * Only per-queue offload can be enabled from applcation.
> +	 * If any pure per-port offload is sent to this function, return -EINVAL
> +	 */
> +	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in rte_eth_tx_queue_setup( )\n",
> +				    port_id,
> +				    tx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.tx_queue_offload_capa);
> +		return -EINVAL;
> +	}
> +
>  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
>  }
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v5] ethdev: check Rx/Tx offloads
  2018-04-26 15:50       ` Ferruh Yigit
@ 2018-04-26 15:56         ` Thomas Monjalon
  2018-04-26 15:59           ` Ferruh Yigit
  2018-04-26 16:11         ` Ferruh Yigit
  1 sibling, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-04-26 15:56 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Wei Dai, qi.z.zhang, dev

26/04/2018 17:50, Ferruh Yigit:
> Lets add one more check and consider this for rc1, later we can update checks
> for suggested API in rc2.

I don't think checks are required for RC1.
Can we take time to properly settle it in RC2?

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v5] ethdev: check Rx/Tx offloads
  2018-04-26 15:56         ` Thomas Monjalon
@ 2018-04-26 15:59           ` Ferruh Yigit
  0 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-04-26 15:59 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: Wei Dai, qi.z.zhang, dev

On 4/26/2018 4:56 PM, Thomas Monjalon wrote:
> 26/04/2018 17:50, Ferruh Yigit:
>> Lets add one more check and consider this for rc1, later we can update checks
>> for suggested API in rc2.
> 
> I don't think checks are required for RC1.
> Can we take time to properly settle it in RC2?

That is OK, I am always having a hesitation about the target of this patch, if
it is trying to implement existing one or suggested one, that is the reason.

Let me update my comment for suggested API change.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v5] ethdev: check Rx/Tx offloads
  2018-04-26 15:50       ` Ferruh Yigit
  2018-04-26 15:56         ` Thomas Monjalon
@ 2018-04-26 16:11         ` Ferruh Yigit
  1 sibling, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-04-26 16:11 UTC (permalink / raw)
  To: Wei Dai, thomas, qi.z.zhang; +Cc: dev

On 4/26/2018 4:50 PM, Ferruh Yigit wrote:
> On 4/26/2018 3:37 PM, Wei Dai wrote:
>> This patch check if a requested offloading is supported
>> in the device capability.
>> Any offloading is disabled by default if it is not set
>> in rte_eth_dev_configure( ) and rte_eth_[rt]x_queue_setup().
>> A per port offloading can only be enabled in
>> rte_eth_dev_configure(). If a per port offloading is
>> sent to rte_eth_[rt]x_queue_setup( ), return error.
>> Only per queue offloading can be sent to
>> rte_eth_[rt]x_queue_setup( ). A per queue offloading is
>> enabled only if it is enabled in rte_eth_dev_configure( ) OR
>> if it is enabled in rte_eth_[rt]x_queue_setup( ).
>> If a per queue offloading is enabled in rte_eth_dev_configure(),
>> it can't be disabled in rte_eth_[rt]x_queue_setup( ).
>> If a per queue offloading is disabled in rte_eth_dev_configure( ),
>> it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
>>
>> This patch can make such checking in a common way in rte_ethdev
>> layer to avoid same checking in underlying PMD.
>>
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> ---
>>  lib/librte_ether/rte_ethdev.c | 56 +++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 56 insertions(+)
>>
>> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
>> index f0f53d4..5485f47 100644
>> --- a/lib/librte_ether/rte_ethdev.c
>> +++ b/lib/librte_ether/rte_ethdev.c
>> @@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>>  							ETHER_MAX_LEN;
>>  	}
>>  
>> +	/* Any requested offload must be within its device capability */
>> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
>> +	     local_conf.rxmode.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
>> +				    "0x%" PRIx64 " doesn't match Rx offloads "
>> +				    "capability 0x%" PRIx64 "\n",
>> +				    port_id,
>> +				    local_conf.rxmode.offloads,
>> +				    dev_info.rx_offload_capa);
>> +		return -EINVAL;
>> +	}
>> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
>> +	     local_conf.txmode.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
>> +				    "0x%" PRIx64 " doesn't match Tx offloads "
>> +				    "capability 0x%" PRIx64 "\n",
>> +				    port_id,
>> +				    local_conf.txmode.offloads,
>> +				    dev_info.tx_offload_capa);
>> +		return -EINVAL;
>> +	}
>> +
>>  	/*
>>  	 * Setup new number of RX/TX queues and reconfigure device.
>>  	 */
>> @@ -1547,6 +1569,23 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>>  						    &local_conf.offloads);
>>  	}
>>  
>> +	/*
>> +	 * Only per-queue offload can be enabled from application.
>> +	 * If any pure per-port offload is sent to this function, return -EINVAL
> 
> This comment doesn't match below check, below doesn't check pure per-port offload
> 
>> +	 */
>> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
>> +	     local_conf.offloads) {> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
>> +				    "Requested offload 0x%" PRIx64 "doesn't "
>> +				    "match per-queue capability 0x%" PRIx64
>> +				    " in rte_eth_rx_queue_setup( )\n",
>> +				    port_id,
>> +				    rx_queue_id,
>> +				    local_conf.offloads,
>> +				    dev_info.rx_queue_offload_capa);
>> +		return -EINVAL;
>> +	}
> 
> Lets add one more check and consider this for rc1, later we can update checks
> for suggested API in rc2.
> 
> Check to add (this is for existing API)
> If a pure per-port update enabled configure, it should be enabled in
> queue_setup() as well.

According comment from Thomas, lets target directly rc2, new API. For that I
think we still need an update to this patch.

In previous version of this patch there was a comment to strip port_offloads
from requested offloads before passing vales to PMD.
Just to highlight not strip port_capability, but port_offloads set by configure()

The logic is if an offload set in configure(), both port level offload or queue
level offload, no need to duplicate it for PMD.

> 
> 
>> +
>>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>>  					      socket_id, &local_conf, mp);
>>  	if (!ret) {
>> @@ -1681,6 +1720,23 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
>>  					  &local_conf.offloads);
>>  	}
>>  
>> +	/*
>> +	 * Only per-queue offload can be enabled from applcation.
>> +	 * If any pure per-port offload is sent to this function, return -EINVAL
>> +	 */
>> +	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
>> +	     local_conf.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
>> +				    "Requested offload 0x%" PRIx64 "doesn't "
>> +				    "match per-queue capability 0x%" PRIx64
>> +				    " in rte_eth_tx_queue_setup( )\n",
>> +				    port_id,
>> +				    tx_queue_id,
>> +				    local_conf.offloads,
>> +				    dev_info.tx_queue_offload_capa);
>> +		return -EINVAL;
>> +	}
>> +
>>  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>>  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
>>  }
>>
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v6] ethdev: check Rx/Tx offloads
  2018-04-26 14:37     ` [dpdk-dev] [PATCH v5] " Wei Dai
  2018-04-26 15:50       ` Ferruh Yigit
@ 2018-05-03  1:30       ` Wei Dai
  2018-05-04 11:12         ` Ferruh Yigit
  2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
  1 sibling, 2 replies; 60+ messages in thread
From: Wei Dai @ 2018-05-03  1:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
>From application, a pure per-port offloading can only be enabled in
rte_eth_dev_configure().
Only supported per queue offloading can be sent to
rte_eth_[rt]x_queue_setup( ). A per queue offloading is
enabled only if it is enabled in rte_eth_dev_configure( ) OR
if it is enabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is enabled in rte_eth_dev_configure(),
it can't be disabled in rte_eth_[rt]x_queue_setup( ).
If a per queue offloading is disabled in rte_eth_dev_configure( ),
it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>

---
v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 lib/librte_ether/rte_ethdev.c | 70 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f0f53d4..39a0f0e 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1196,6 +1196,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offload must be within its device capability */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capability 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/*
 	 * Setup new number of RX/TX queues and reconfigure device.
 	 */
@@ -1547,6 +1569,30 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from application.
+	 * If any other offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in %s\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * If an offload has already been enabled in rte_eth_dev_configure(),
+	 * there is no need to enable it again in queue level.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1681,6 +1727,30 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * Only per-queue offload can be enabled from applcation.
+	 * If any other offload is sent to this function, return -EINVAL
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in %s\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * If an offload has already be enabled in rte_eth_dev_configure,
+	 * there is no need to enable it in queue level again
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v6] ethdev: check Rx/Tx offloads
  2018-05-03  1:30       ` [dpdk-dev] [PATCH v6] " Wei Dai
@ 2018-05-04 11:12         ` Ferruh Yigit
  2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
  1 sibling, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-04 11:12 UTC (permalink / raw)
  To: Wei Dai, thomas, qi.z.zhang; +Cc: dev

On 5/3/2018 2:30 AM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
> From application, a pure per-port offloading can only be enabled in
> rte_eth_dev_configure().
> Only supported per queue offloading can be sent to
> rte_eth_[rt]x_queue_setup( ). A per queue offloading is
> enabled only if it is enabled in rte_eth_dev_configure( ) OR
> if it is enabled in rte_eth_[rt]x_queue_setup( ).
> If a per queue offloading is enabled in rte_eth_dev_configure(),
> it can't be disabled in rte_eth_[rt]x_queue_setup( ).
> If a per queue offloading is disabled in rte_eth_dev_configure( ),
> it can be enabled or disabled( ) in rte_eth_[rt]x_queue_setup( ).
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.

Hi Wei, Thomas,

There are a few comments below but there is another concern, this change will
break existing checks in PMDs.
Perhaps this check, PMD updates and update to document API change should go in
one patch, what do you think?

> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> 
> ---
> v6:
> No need enable an offload in queue_setup( ) if it has already
> been enabled in dev_configure( )
> 
> v5:
> keep offload settings sent to PMD same as those from application
> 
> v4:
> fix a wrong description in git log message.
> 
> v3:
> rework according to dicision of offloading API in community
> 
> v2:
> add offloads checking in rte_eth_dev_configure( ).
> check if a requested offloading is supported.

<...>

> @@ -1547,6 +1569,30 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  						    &local_conf.offloads);
>  	}
>  
> +	/*
> +	 * Only per-queue offload can be enabled from application.
> +	 * If any other offload is sent to this function, return -EINVAL
> +	 */
> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in %s\n",
> +				    port_id,
> +				    rx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.rx_queue_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}

Application will be allowed to provide queue or port offload in setup, only
error case should be if application request a port level offload that is not
already enabled in configure()

Also similar check in configure() required, to be sure app is not requesting
offload beyond device capability (port + queue)

> +
> +	/*
> +	 * If an offload has already been enabled in rte_eth_dev_configure(),
> +	 * there is no need to enable it again in queue level.
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;

This is OK, with above check PMD will only observe new queue offload request.

>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>  					      socket_id, &local_conf, mp);
>  	if (!ret) {
> @@ -1681,6 +1727,30 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
>  					  &local_conf.offloads);
>  	}
>  
> +	/*
> +	 * Only per-queue offload can be enabled from applcation.
> +	 * If any other offload is sent to this function, return -EINVAL
> +	 */
> +	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in %s\n",
> +				    port_id,
> +				    tx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.tx_queue_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * If an offload has already be enabled in rte_eth_dev_configure,
> +	 * there is no need to enable it in queue level again
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
> +
>  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
>  }
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
  2018-05-03  1:30       ` [dpdk-dev] [PATCH v6] " Wei Dai
  2018-05-04 11:12         ` Ferruh Yigit
@ 2018-05-04 14:02         ` Wei Dai
  2018-05-04 14:42           ` Ferruh Yigit
                             ` (3 more replies)
  1 sibling, 4 replies; 60+ messages in thread
From: Wei Dai @ 2018-05-04 14:02 UTC (permalink / raw)
  To: thomas, ferruh.yigit; +Cc: dev, Wei Dai

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>From application, a pure per-port offloading can't be enabled on
any queue if it hasn't been enabled in rte_eth_dev_configure( ).
If any offloading is enabled in rte_eth_dev_configure( ) by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup( ) function only carries those
offloadings only enabled for the queue but not enabled in
rte_eth_dev_configure( ) and they are certain per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>

---
v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 lib/librte_ethdev/rte_ethdev.c | 150 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 150 insertions(+)

diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..0ad05eb 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capabilities 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capabilities 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1414,6 +1436,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	struct rte_eth_dev_info dev_info;
 	struct rte_eth_rxconf local_conf;
 	void **rxq;
+	uint64_t pure_port_offload_capa;
+	uint64_t only_enabled_for_queue;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1504,6 +1528,68 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * The requested offloadings by application for this queue
+	 * can be per-queue type or per-port type. and
+	 * they must be within the device offloading capabilities.
+	 */
+	if ((local_conf.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in %s\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * A pure per-port offloading can't be enabled for any queue
+	 * if it hasn't been enabled in rte_eth_dev_configure( ).
+	 *
+	 * Following pure_port_offload_capa is the capabilities which
+	 * can't be enabled on some queue while disabled on other queue.
+	 * pure_port_offload_capa must be enabled or disabled on all
+	 * queues at same time.
+	 *
+	 * Following only_enabled_for_queue is the offloadings which
+	 * are enabled for this queue but hasn't been enabled in
+	 * rte_eth_dev_configure( ).
+	 */
+	pure_port_offload_capa = dev_info.rx_offload_capa ^
+				 dev_info.rx_queue_offload_capa;
+	only_enabled_for_queue = (local_conf.offloads ^
+		dev->data->dev_conf.rxmode.offloads) & local_conf.offloads;
+	if (only_enabled_for_queue & pure_port_offload_capa) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, only "
+				    "enabled offload 0x%" PRIx64 "for this "
+				    "queue haven't been enabled in "
+				    "dev_configure( ), they are within "
+				    "pure per-port capabilities 0x%" PRIx64
+				    " in %s\n",
+				    port_id,
+				    rx_queue_id,
+				    only_enabled_for_queue,
+				    pure_port_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1549,6 +1635,8 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	struct rte_eth_dev_info dev_info;
 	struct rte_eth_txconf local_conf;
 	void **txq;
+	uint64_t pure_port_offload_capa;
+	uint64_t only_enabled_for_queue;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1612,6 +1700,68 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * The requested offloadings by application for this queue
+	 * can be per-queue type or per-port type. and
+	 * they must be within the device offloading capabilities.
+	 */
+	if ((local_conf.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d "
+				    "Requested offload 0x%" PRIx64 "doesn't "
+				    "match per-queue capability 0x%" PRIx64
+				    " in %s\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * A pure per-port offloading can't be enabled for any queue
+	 * if it hasn't been enabled in rte_eth_dev_configure( ).
+	 *
+	 * Following pure_port_offload_capa is the capabilities which
+	 * can't be enabled on some queue while disabled on other queue.
+	 * pure_port_offload_capa must be enabled or disabled on all
+	 * queues at same time.
+	 *
+	 * Following only_enabled_for_queue is the offloadings which
+	 * are enabled for this queue but hasn't been enabled in
+	 * rte_eth_dev_configure( ).
+	 */
+	pure_port_offload_capa = dev_info.tx_offload_capa ^
+				 dev_info.tx_queue_offload_capa;
+	only_enabled_for_queue = (local_conf.offloads ^
+		dev->data->dev_conf.txmode.offloads) & local_conf.offloads;
+	if (only_enabled_for_queue & pure_port_offload_capa) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d, only "
+				    "enabled offload 0x%" PRIx64 "for this "
+				    "queue haven't been enabled in "
+				    "dev_configure( ), they are within "
+				    "pure per-port capabilities 0x%" PRIx64
+				    " in %s\n",
+				    port_id,
+				    tx_queue_id,
+				    only_enabled_for_queue,
+				    pure_port_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
  2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
@ 2018-05-04 14:42           ` Ferruh Yigit
  2018-05-04 14:45             ` Ferruh Yigit
  2018-05-05 18:59           ` Shahaf Shuler
                             ` (2 subsequent siblings)
  3 siblings, 1 reply; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-04 14:42 UTC (permalink / raw)
  To: Wei Dai, thomas; +Cc: dev

On 5/4/2018 3:02 PM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> From application, a pure per-port offloading can't be enabled on
> any queue if it hasn't been enabled in rte_eth_dev_configure( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> offloadings only enabled for the queue but not enabled in
> rte_eth_dev_configure( ) and they are certain per-queue type.
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>

Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>


As mentioned in prev version, getting only this patch breaks the applications
because of existing checks in the PMDs.

Hi Wei,

If you have bandwidth, can you update PMDs to remove their existing offload
checks in this patch?


PMDs needs to be updated for:
1- Remove existing offload verify checks
2- Update offload configure logic based on new values

(1) can be part of this patch. But PMD maintainers should send update for (2) if
a change required.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
  2018-05-04 14:42           ` Ferruh Yigit
@ 2018-05-04 14:45             ` Ferruh Yigit
  0 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-04 14:45 UTC (permalink / raw)
  To: Wei Dai, thomas; +Cc: dev, Shahaf Shuler

On 5/4/2018 3:42 PM, Ferruh Yigit wrote:
> On 5/4/2018 3:02 PM, Wei Dai wrote:
>> This patch check if a input requested offloading is valid or not.
>> Any reuqested offloading must be supported in the device capabilities.
>> Any offloading is disabled by default if it is not set in the parameter
>> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>> From application, a pure per-port offloading can't be enabled on
>> any queue if it hasn't been enabled in rte_eth_dev_configure( ).
>> If any offloading is enabled in rte_eth_dev_configure( ) by application,
>> it is enabled on all queues no matter whether it is per-queue or
>> per-port type and no matter whether it is set or cleared in
>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>> The underlying PMD must be aware that the requested offloadings
>> to PMD specific queue_setup( ) function only carries those
>> offloadings only enabled for the queue but not enabled in
>> rte_eth_dev_configure( ) and they are certain per-queue type.
>>
>> This patch can make above such checking in a common way in rte_ethdev
>> layer to avoid same checking in underlying PMD.
>>
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> 
> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
> 
> 
> As mentioned in prev version, getting only this patch breaks the applications
> because of existing checks in the PMDs.
> 
> Hi Wei,
> 
> If you have bandwidth, can you update PMDs to remove their existing offload
> checks in this patch?
> 
> 
> PMDs needs to be updated for:
> 1- Remove existing offload verify checks
> 2- Update offload configure logic based on new values
> 
> (1) can be part of this patch. But PMD maintainers should send update for (2) if
> a change required.

cc'ed Shahaf, specially for (2) one.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
  2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
  2018-05-04 14:42           ` Ferruh Yigit
@ 2018-05-05 18:59           ` Shahaf Shuler
  2018-05-07  7:15             ` Dai, Wei
  2018-05-08 10:58             ` Ferruh Yigit
  2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
  2018-05-08 10:10           ` [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads Wei Dai
  3 siblings, 2 replies; 60+ messages in thread
From: Shahaf Shuler @ 2018-05-05 18:59 UTC (permalink / raw)
  To: Wei Dai, Thomas Monjalon, ferruh.yigit; +Cc: dev

Hi Ferruh, Dai,
> Subject: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
> 
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and [rt]x_conf-
> >offloads to rte_eth_[rt]x_queue_setup( ).
> From application, a pure per-port offloading can't be enabled on any queue if
> it hasn't been enabled in rte_eth_dev_configure( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application, it is
> enabled on all queues no matter whether it is per-queue or per-port type
> and no matter whether it is set or cleared in [rt]x_conf->offloads to
> rte_eth_[rt]x_queue_setup( ).
> The underlying PMD must be aware that the requested offloadings to PMD
> specific queue_setup( ) function only carries those offloadings only enabled
> for the queue but not enabled in rte_eth_dev_configure( ) and they are
> certain per-queue type.
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> 
> ---
> v7:
> Give the maximum freedom for upper application, only minimal checking is
> performed in ethdev layer.
> Only requested specific pure per-queue offloadings are input to underlying
> PMD.
> 
> v6:
> No need enable an offload in queue_setup( ) if it has already been enabled
> in dev_configure( )
> 
> v5:
> keep offload settings sent to PMD same as those from application
> 
> v4:
> fix a wrong description in git log message.
> 
> v3:
> rework according to dicision of offloading API in community
> 
> v2:
> add offloads checking in rte_eth_dev_configure( ).
> check if a requested offloading is supported.
> ---
>  lib/librte_ethdev/rte_ethdev.c | 150
> +++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 150 insertions(+)
> 
> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
> index e560524..0ad05eb 100644
> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t
> nb_rx_q, uint16_t nb_tx_q,
>  							ETHER_MAX_LEN;
>  	}
> 
> +	/* Any requested offloading must be within its device capabilities */
> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.rxmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> offloads "
> +				    "0x%" PRIx64 " doesn't match Rx offloads "
> +				    "capabilities 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.rxmode.offloads,
> +				    dev_info.rx_offload_capa);
> +		return -EINVAL;

While I am OK with such behavior, we should be more careful not to get into the same issue as in [1].
There are PMD which don't report the capabilities correctly however do expect to have the offload configured.

All I am saying it is worth a check and cautious decision if it is right to include this one w/o prior application notice and at such late RC of the release. 

> +	}
> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.txmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> offloads "
> +				    "0x%" PRIx64 " doesn't match Tx offloads "
> +				    "capabilities 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.txmode.offloads,
> +				    dev_info.tx_offload_capa);
> +		return -EINVAL;
> +	}
> +
>  	/* Check that device supports requested rss hash functions. */
>  	if ((dev_info.flow_type_rss_offloads |
>  	     dev_conf->rx_adv_conf.rss_conf.rss_hf) != @@ -1414,6 +1436,8
> @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  	struct rte_eth_dev_info dev_info;
>  	struct rte_eth_rxconf local_conf;
>  	void **rxq;
> +	uint64_t pure_port_offload_capa;
> +	uint64_t only_enabled_for_queue;
> 
>  	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
> 
> @@ -1504,6 +1528,68 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> uint16_t rx_queue_id,
>  						    &local_conf.offloads);
>  	}
> 
> +	/*
> +	 * The requested offloadings by application for this queue
> +	 * can be per-queue type or per-port type. and
> +	 * they must be within the device offloading capabilities.
> +	 */
> +	if ((local_conf.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> rx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in %s\n",
> +				    port_id,
> +				    rx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.rx_queue_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * A pure per-port offloading can't be enabled for any queue
> +	 * if it hasn't been enabled in rte_eth_dev_configure( ).
> +	 *
> +	 * Following pure_port_offload_capa is the capabilities which
> +	 * can't be enabled on some queue while disabled on other queue.
> +	 * pure_port_offload_capa must be enabled or disabled on all
> +	 * queues at same time.
> +	 *
> +	 * Following only_enabled_for_queue is the offloadings which
> +	 * are enabled for this queue but hasn't been enabled in
> +	 * rte_eth_dev_configure( ).
> +	 */
> +	pure_port_offload_capa = dev_info.rx_offload_capa ^
> +				 dev_info.rx_queue_offload_capa;
> +	only_enabled_for_queue = (local_conf.offloads ^
> +		dev->data->dev_conf.rxmode.offloads) &
> local_conf.offloads;

It looks like above logic could be a lot simpler. 

How about:
local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; // keep only the added offloads on top of the port ones
if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
    local_conf.offloads) { //check if added offloads are part of the queue offload capa
	ERROR...


> +	if (only_enabled_for_queue & pure_port_offload_capa) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> rx_queue_id=%d, only "
> +				    "enabled offload 0x%" PRIx64 "for this "
> +				    "queue haven't been enabled in "
> +				    "dev_configure( ), they are within "
> +				    "pure per-port capabilities 0x%" PRIx64

Need to re-work this error message. The user doesn't know what are "pure per-port capabilities" 

> +				    " in %s\n",
> +				    port_id,
> +				    rx_queue_id,
> +				    only_enabled_for_queue,
> +				    pure_port_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * If an offloading has already been enabled in
> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> +	 * so there is no need to enable it in this queue again.
> +	 * The local_conf.offloads input to underlying PMD only carries
> +	 * those offloadings which are only enabled on this queue and
> +	 * not enabled on all queues.
> +	 * The underlying PMD must be aware of this point.
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> +
>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
> nb_rx_desc,
>  					      socket_id, &local_conf, mp);
>  	if (!ret) {
> @@ -1549,6 +1635,8 @@ rte_eth_tx_queue_setup(uint16_t port_id,
> uint16_t tx_queue_id,
>  	struct rte_eth_dev_info dev_info;
>  	struct rte_eth_txconf local_conf;
>  	void **txq;
> +	uint64_t pure_port_offload_capa;
> +	uint64_t only_enabled_for_queue;
> 
>  	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
> 
> @@ -1612,6 +1700,68 @@ rte_eth_tx_queue_setup(uint16_t port_id,
> uint16_t tx_queue_id,
>  					  &local_conf.offloads);
>  	}
> 
> +	/*
> +	 * The requested offloadings by application for this queue
> +	 * can be per-queue type or per-port type. and
> +	 * they must be within the device offloading capabilities.
> +	 */
> +	if ((local_conf.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> tx_queue_id=%d "
> +				    "Requested offload 0x%" PRIx64 "doesn't "
> +				    "match per-queue capability 0x%" PRIx64
> +				    " in %s\n",
> +				    port_id,
> +				    tx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.tx_queue_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * A pure per-port offloading can't be enabled for any queue
> +	 * if it hasn't been enabled in rte_eth_dev_configure( ).
> +	 *
> +	 * Following pure_port_offload_capa is the capabilities which
> +	 * can't be enabled on some queue while disabled on other queue.
> +	 * pure_port_offload_capa must be enabled or disabled on all
> +	 * queues at same time.
> +	 *
> +	 * Following only_enabled_for_queue is the offloadings which
> +	 * are enabled for this queue but hasn't been enabled in
> +	 * rte_eth_dev_configure( ).
> +	 */
> +	pure_port_offload_capa = dev_info.tx_offload_capa ^
> +				 dev_info.tx_queue_offload_capa;
> +	only_enabled_for_queue = (local_conf.offloads ^
> +		dev->data->dev_conf.txmode.offloads) &
> local_conf.offloads;

Same comments as in the Rx part.  

> +	if (only_enabled_for_queue & pure_port_offload_capa) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> tx_queue_id=%d, only "
> +				    "enabled offload 0x%" PRIx64 "for this "
> +				    "queue haven't been enabled in "
> +				    "dev_configure( ), they are within "
> +				    "pure per-port capabilities 0x%" PRIx64
> +				    " in %s\n",
> +				    port_id,
> +				    tx_queue_id,
> +				    only_enabled_for_queue,
> +				    pure_port_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * If an offloading has already been enabled in
> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> +	 * so there is no need to enable it in this queue again.
> +	 * The local_conf.offloads input to underlying PMD only carries
> +	 * those offloadings which are only enabled on this queue and
> +	 * not enabled on all queues.
> +	 * The underlying PMD must be aware of this point.
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
> +
>  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));  }
> --
> 2.7.5


As for Ferruh's comment
> 
> PMDs needs to be updated for:
> 1- Remove existing offload verify checks
> 2- Update offload configure logic based on new values
> 
> (1) can be part of this patch. But PMD maintainers should send update 
> for (2) if a change required.
>
>cc'ed Shahaf, specially for (2) one.

I think PMD maintainers can help with that. If it will be integrated enough time before the release Mellanox PMDs can be converted by us. 




[1]
http://dpdk.org/dev/patchwork/patch/38645/

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
  2018-05-05 18:59           ` Shahaf Shuler
@ 2018-05-07  7:15             ` Dai, Wei
  2018-05-08 10:58             ` Ferruh Yigit
  1 sibling, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-07  7:15 UTC (permalink / raw)
  To: Shahaf Shuler, Thomas Monjalon, Yigit, Ferruh; +Cc: dev

Thanks to Shuler and Ferruh for your feedback and guidance.

PMD at least has these 2 options with this patch:
a). If PMD doesn't want to make much more changes, it still can do "[rt]x_conf->offloads |= dev->data->dev_conf.rxmode.offloads;"
   in the beginning of its specific queue_setup( ) and just remove offload checking (although the checking always pass now) and all
   others keep same. In this way, PMDs still comply with the offload APIs defined in 17.11.
b). PMD also can use the info that only new added queue-level offloads in the input argument [rt]x_conf->offloads to make some
   optimization or other code changes. It may be more efficient than a).

As Ferruh said, only this patch and without relevant change in PMD will cause application broken,
I will submit v8 patch which will include this patch for ethdev and code changes in PMDs with above option a and document update.
I'd like include all these changes in only one patch to avoid application failure if some patches are not applied and some are applied.
PMD maintainers call go on with option b) 

Shuler's suggestion to simplify the new added offloads in queue_setup( ) is better.
I will adopt it in my v8 patch.

> -----Original Message-----
> From: Shahaf Shuler [mailto:shahafs@mellanox.com]
> Sent: Sunday, May 6, 2018 3:00 AM
> To: Dai, Wei <wei.dai@intel.com>; Thomas Monjalon
> <thomas@monjalon.net>; Yigit, Ferruh <ferruh.yigit@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
> 
> Hi Ferruh, Dai,
> > Subject: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
> >
> > This patch check if a input requested offloading is valid or not.
> > Any reuqested offloading must be supported in the device capabilities.
> > Any offloading is disabled by default if it is not set in the
> > parameter dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> > [rt]x_conf-
> > >offloads to rte_eth_[rt]x_queue_setup( ).
> > From application, a pure per-port offloading can't be enabled on any
> > queue if it hasn't been enabled in rte_eth_dev_configure( ).
> > If any offloading is enabled in rte_eth_dev_configure( ) by
> > application, it is enabled on all queues no matter whether it is
> > per-queue or per-port type and no matter whether it is set or cleared
> > in [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > The underlying PMD must be aware that the requested offloadings to PMD
> > specific queue_setup( ) function only carries those offloadings only
> > enabled for the queue but not enabled in rte_eth_dev_configure( ) and
> > they are certain per-queue type.
> >
> > This patch can make above such checking in a common way in rte_ethdev
> > layer to avoid same checking in underlying PMD.
> >
> > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> >
> > ---
> > v7:
> > Give the maximum freedom for upper application, only minimal checking
> > is performed in ethdev layer.
> > Only requested specific pure per-queue offloadings are input to
> > underlying PMD.
> >
> > v6:
> > No need enable an offload in queue_setup( ) if it has already been
> > enabled in dev_configure( )
> >
> > v5:
> > keep offload settings sent to PMD same as those from application
> >
> > v4:
> > fix a wrong description in git log message.
> >
> > v3:
> > rework according to dicision of offloading API in community
> >
> > v2:
> > add offloads checking in rte_eth_dev_configure( ).
> > check if a requested offloading is supported.
> > ---
> >  lib/librte_ethdev/rte_ethdev.c | 150
> > +++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 150 insertions(+)
> >
> > diff --git a/lib/librte_ethdev/rte_ethdev.c
> > b/lib/librte_ethdev/rte_ethdev.c index e560524..0ad05eb 100644
> > --- a/lib/librte_ethdev/rte_ethdev.c
> > +++ b/lib/librte_ethdev/rte_ethdev.c
> > @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> >  							ETHER_MAX_LEN;
> >  	}
> >
> > +	/* Any requested offloading must be within its device capabilities */
> > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > +	     local_conf.rxmode.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> > offloads "
> > +				    "0x%" PRIx64 " doesn't match Rx offloads "
> > +				    "capabilities 0x%" PRIx64 "\n",
> > +				    port_id,
> > +				    local_conf.rxmode.offloads,
> > +				    dev_info.rx_offload_capa);
> > +		return -EINVAL;
> 
> While I am OK with such behavior, we should be more careful not to get into
> the same issue as in [1].
> There are PMD which don't report the capabilities correctly however do
> expect to have the offload configured.
> 
> All I am saying it is worth a check and cautious decision if it is right to include
> this one w/o prior application notice and at such late RC of the release.
> 
> > +	}
> > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > +	     local_conf.txmode.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> > offloads "
> > +				    "0x%" PRIx64 " doesn't match Tx offloads "
> > +				    "capabilities 0x%" PRIx64 "\n",
> > +				    port_id,
> > +				    local_conf.txmode.offloads,
> > +				    dev_info.tx_offload_capa);
> > +		return -EINVAL;
> > +	}
> > +
> >  	/* Check that device supports requested rss hash functions. */
> >  	if ((dev_info.flow_type_rss_offloads |
> >  	     dev_conf->rx_adv_conf.rss_conf.rss_hf) != @@ -1414,6 +1436,8
> @@
> > rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
> >  	struct rte_eth_dev_info dev_info;
> >  	struct rte_eth_rxconf local_conf;
> >  	void **rxq;
> > +	uint64_t pure_port_offload_capa;
> > +	uint64_t only_enabled_for_queue;
> >
> >  	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
> >
> > @@ -1504,6 +1528,68 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> > uint16_t rx_queue_id,
> >  						    &local_conf.offloads);
> >  	}
> >
> > +	/*
> > +	 * The requested offloadings by application for this queue
> > +	 * can be per-queue type or per-port type. and
> > +	 * they must be within the device offloading capabilities.
> > +	 */
> > +	if ((local_conf.offloads & dev_info.rx_offload_capa) !=
> > +	     local_conf.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> > rx_queue_id=%d "
> > +				    "Requested offload 0x%" PRIx64 "doesn't "
> > +				    "match per-queue capability 0x%" PRIx64
> > +				    " in %s\n",
> > +				    port_id,
> > +				    rx_queue_id,
> > +				    local_conf.offloads,
> > +				    dev_info.rx_queue_offload_capa,
> > +				    __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/*
> > +	 * A pure per-port offloading can't be enabled for any queue
> > +	 * if it hasn't been enabled in rte_eth_dev_configure( ).
> > +	 *
> > +	 * Following pure_port_offload_capa is the capabilities which
> > +	 * can't be enabled on some queue while disabled on other queue.
> > +	 * pure_port_offload_capa must be enabled or disabled on all
> > +	 * queues at same time.
> > +	 *
> > +	 * Following only_enabled_for_queue is the offloadings which
> > +	 * are enabled for this queue but hasn't been enabled in
> > +	 * rte_eth_dev_configure( ).
> > +	 */
> > +	pure_port_offload_capa = dev_info.rx_offload_capa ^
> > +				 dev_info.rx_queue_offload_capa;
> > +	only_enabled_for_queue = (local_conf.offloads ^
> > +		dev->data->dev_conf.rxmode.offloads) &
> > local_conf.offloads;
> 
> It looks like above logic could be a lot simpler.
> 
> How about:
> local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; // keep
> only the added offloads on top of the port ones if ((local_conf.offloads &
> dev_info.rx_queue_offload_capa) !=
>     local_conf.offloads) { //check if added offloads are part of the queue
> offload capa
> 	ERROR...
> 
> 
> > +	if (only_enabled_for_queue & pure_port_offload_capa) {
> > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> > rx_queue_id=%d, only "
> > +				    "enabled offload 0x%" PRIx64 "for this "
> > +				    "queue haven't been enabled in "
> > +				    "dev_configure( ), they are within "
> > +				    "pure per-port capabilities 0x%" PRIx64
> 
> Need to re-work this error message. The user doesn't know what are "pure
> per-port capabilities"
> 
> > +				    " in %s\n",
> > +				    port_id,
> > +				    rx_queue_id,
> > +				    only_enabled_for_queue,
> > +				    pure_port_offload_capa,
> > +				    __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/*
> > +	 * If an offloading has already been enabled in
> > +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> > +	 * so there is no need to enable it in this queue again.
> > +	 * The local_conf.offloads input to underlying PMD only carries
> > +	 * those offloadings which are only enabled on this queue and
> > +	 * not enabled on all queues.
> > +	 * The underlying PMD must be aware of this point.
> > +	 */
> > +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> > +
> >  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
> >  					      socket_id, &local_conf, mp);
> >  	if (!ret) {
> > @@ -1549,6 +1635,8 @@ rte_eth_tx_queue_setup(uint16_t port_id,
> > uint16_t tx_queue_id,
> >  	struct rte_eth_dev_info dev_info;
> >  	struct rte_eth_txconf local_conf;
> >  	void **txq;
> > +	uint64_t pure_port_offload_capa;
> > +	uint64_t only_enabled_for_queue;
> >
> >  	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
> >
> > @@ -1612,6 +1700,68 @@ rte_eth_tx_queue_setup(uint16_t port_id,
> > uint16_t tx_queue_id,
> >  					  &local_conf.offloads);
> >  	}
> >
> > +	/*
> > +	 * The requested offloadings by application for this queue
> > +	 * can be per-queue type or per-port type. and
> > +	 * they must be within the device offloading capabilities.
> > +	 */
> > +	if ((local_conf.offloads & dev_info.tx_offload_capa) !=
> > +	     local_conf.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> > tx_queue_id=%d "
> > +				    "Requested offload 0x%" PRIx64 "doesn't "
> > +				    "match per-queue capability 0x%" PRIx64
> > +				    " in %s\n",
> > +				    port_id,
> > +				    tx_queue_id,
> > +				    local_conf.offloads,
> > +				    dev_info.tx_queue_offload_capa,
> > +				    __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/*
> > +	 * A pure per-port offloading can't be enabled for any queue
> > +	 * if it hasn't been enabled in rte_eth_dev_configure( ).
> > +	 *
> > +	 * Following pure_port_offload_capa is the capabilities which
> > +	 * can't be enabled on some queue while disabled on other queue.
> > +	 * pure_port_offload_capa must be enabled or disabled on all
> > +	 * queues at same time.
> > +	 *
> > +	 * Following only_enabled_for_queue is the offloadings which
> > +	 * are enabled for this queue but hasn't been enabled in
> > +	 * rte_eth_dev_configure( ).
> > +	 */
> > +	pure_port_offload_capa = dev_info.tx_offload_capa ^
> > +				 dev_info.tx_queue_offload_capa;
> > +	only_enabled_for_queue = (local_conf.offloads ^
> > +		dev->data->dev_conf.txmode.offloads) &
> > local_conf.offloads;
> 
> Same comments as in the Rx part.
> 
> > +	if (only_enabled_for_queue & pure_port_offload_capa) {
> > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
> > tx_queue_id=%d, only "
> > +				    "enabled offload 0x%" PRIx64 "for this "
> > +				    "queue haven't been enabled in "
> > +				    "dev_configure( ), they are within "
> > +				    "pure per-port capabilities 0x%" PRIx64
> > +				    " in %s\n",
> > +				    port_id,
> > +				    tx_queue_id,
> > +				    only_enabled_for_queue,
> > +				    pure_port_offload_capa,
> > +				    __func__);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/*
> > +	 * If an offloading has already been enabled in
> > +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> > +	 * so there is no need to enable it in this queue again.
> > +	 * The local_conf.offloads input to underlying PMD only carries
> > +	 * those offloadings which are only enabled on this queue and
> > +	 * not enabled on all queues.
> > +	 * The underlying PMD must be aware of this point.
> > +	 */
> > +	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
> > +
> >  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
> >  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));  }
> > --
> > 2.7.5
> 
> 
> As for Ferruh's comment
> >
> > PMDs needs to be updated for:
> > 1- Remove existing offload verify checks
> > 2- Update offload configure logic based on new values
> >
> > (1) can be part of this patch. But PMD maintainers should send update
> > for (2) if a change required.
> >
> >cc'ed Shahaf, specially for (2) one.
> 
> I think PMD maintainers can help with that. If it will be integrated enough
> time before the release Mellanox PMDs can be converted by us.
> 
> 
> 
> 
> [1]
> http://dpdk.org/dev/patchwork/patch/38645/
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
  2018-05-04 14:42           ` Ferruh Yigit
  2018-05-05 18:59           ` Shahaf Shuler
@ 2018-05-08 10:05           ` Wei Dai
  2018-05-08 10:41             ` Andrew Rybchenko
                               ` (3 more replies)
  2018-05-08 10:10           ` [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads Wei Dai
  3 siblings, 4 replies; 60+ messages in thread
From: Wei Dai @ 2018-05-08 10:05 UTC (permalink / raw)
  To: ferruh.yigit, thomas, declan.doherty, linville, mw, mk, gtzalik,
	evgenys, ravi1.kumar, shepard.siegel, ed.czeck, john.miller,
	ajit.khaparde, somnath.kotur, jerin.jacob, maciej.czekaj,
	shijith.thotton, ssrinivasan, santosh.shukla, rahul.lakkireddy,
	ohndale, hyonkim, wenzhuo.lu, konstantin.ananyev, beilei.xing,
	qi.z.zhang, xiao.w.wang, jingjing.wu, tdu, dima, nsamsono,
	jianbo.liu, adrien.mazarguil, nelio.laranjeiro, yskoh, matan,
	vido, alejandro.lucero, emant.agrawal, shreyansh.jain,
	hemant.agrawal, harish.patil, rasesh.mody, asesh.mody,
	shahed.shaikh, arybchenko, yongwang, maxime.coquelin, mtetsuyah,
	tiwei.bie, allain.legacy, matt.peters, pascal.mazon,
	bruce.richardson, gaetan.rivet, jasvinder.singh,
	cristian.dumitrescu
  Cc: dev, Wei Dai

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If any offloading is enabled in rte_eth_dev_configure( ) by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup( ).
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure( ) and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
otherwise return error.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup( ) function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get( ).

In the beginning of [rt]x_queue_setup( ) of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--
 doc/guides/rel_notes/release_18_05.rst  |   8 ++
 drivers/net/avf/avf_rxtx.c              |   5 +-
 drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
 drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
 drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
 drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
 drivers/net/e1000/em_ethdev.c           |  19 ----
 drivers/net/e1000/em_rxtx.c             |  64 ++-----------
 drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
 drivers/net/ena/ena_ethdev.c            |  65 +------------
 drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
 drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
 drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
 drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
 drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
 drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
 drivers/net/mlx4/mlx4_txq.c             |  42 ++------
 drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
 drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
 drivers/net/mlx5/mlx5_txq.c             |  44 +--------
 drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
 drivers/net/nfp/nfp_net.c               | 163 --------------------------------
 drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
 drivers/net/sfc/sfc_ethdev.c            |   9 +-
 drivers/net/sfc/sfc_rx.c                |  42 ++------
 drivers/net/sfc/sfc_rx.h                |   3 +-
 drivers/net/sfc/sfc_tx.c                |  42 ++------
 drivers/net/sfc/sfc_tx.h                |   3 +-
 drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
 drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
 drivers/net/virtio/virtio_rxtx.c        |   9 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
 drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
 lib/librte_ethdev/rte_ethdev.c          |  88 +++++++++++++++++
 35 files changed, 240 insertions(+), 1346 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 09a93ba..56483fb 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If any offloading is enabled in ``rte_eth_dev_configure( )`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure( )``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup( )`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type, otherwise return error.
 
 For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 0ae61e8..637e684 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -303,6 +303,14 @@ API Changes
   * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
   * ``rte_flow_query()`` API parameter changed from action type to action structure.
 
+* **ethdev: changes to offload API**
+
+   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+   ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in ``rte_eth_dev_configure( )``
+   can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added offloading which has
+   not been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled in
+   ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise return error.
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 1824ed7..e03a136 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t i, base, bsf, tc_mapping;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
 	if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > AVF_MAX_RING_DESC ||
 	    nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->free_thresh = tx_free_thresh;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	/* Allocate software ring */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 348129d..d00b99f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
-	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
-	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
 
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 3df51b5..fadf684 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
-	uint64_t unsupported_offloads, configured_offloads;
+	uint64_t configured_offloads;
 	int err;
 
 	CXGBE_FUNC_TRACE();
 	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
-	configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
-	unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
+		eth_dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_txconf *tx_conf)
+			     const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 	int err = 0;
 	unsigned int temp_nb_desc;
-	uint64_t unsupported_offloads;
-
-	unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_rxconf *rx_conf,
+			     const struct rte_eth_rxconf *rx_conf __rte_unused,
 			     struct rte_mempool *mp)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	unsigned int temp_nb_desc;
 	struct rte_eth_dev_info dev_info;
 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
-	uint64_t unsupported_offloads, configured_offloads;
-
-	configured_offloads = rx_conf->offloads;
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 6bf8c15..199afdd 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c304b82..de8d83a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA2_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA2_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA2_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA2_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 694a624..4e890ad 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
 
-	eth_em_infos_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return tx_queue_offload_capa;
 }
 
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t tsize;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			em_get_tx_port_offloads_capa(dev),
-			em_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	return 0;
 }
 
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	struct em_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	uint32_t rsize;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			em_get_rx_port_offloads_capa(dev),
-			em_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a3776a0..128ed0b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_tx_queue *txq;
 	struct e1000_hw     *hw;
 	uint32_t size;
+	uint64_t offloads;
 
-	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			igb_get_tx_port_offloads_capa(dev),
-			igb_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	return 0;
 }
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	unsigned int size;
+	uint64_t offloads;
 
-	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			igb_get_rx_port_offloads_capa(dev),
-			igb_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 41b5638..c595cc7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
-	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      const struct rte_eth_rxconf *rx_conf,
+			      __rte_unused const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
-	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    tx_offloads, adapter->tx_supported_offloads);
-		return -ENOTSUP;
-	}
-
-	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    rx_offloads, adapter->rx_supported_offloads);
-		return -ENOTSUP;
-	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
-	adapter->tx_selected_offloads = tx_offloads;
-	adapter->rx_selected_offloads = rx_offloads;
+	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
 	return 0;
 }
 
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->tx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->rx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 6d44884..368d23f 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -90,22 +90,10 @@ static int
 fs_dev_configure(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
-	uint64_t supp_tx_offloads;
-	uint64_t tx_offloads;
 	uint8_t i;
 	int ret;
 
 	fs_lock(dev, 0);
-	supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		ERROR("Some Tx offloads are not supported, "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-		      tx_offloads, supp_tx_offloads);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
 	fs_unlock(dev, 0);
 }
 
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.rxmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 		fs_rx_queue_release(rxq);
 		dev->data->rx_queues[rx_queue_id] = NULL;
 	}
-	/* Verify application offloads are valid for our port and queue. */
-	if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Rx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      rx_conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      PRIV(dev)->infos.rx_offload_capa |
-		      PRIV(dev)->infos.rx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	rxq = rte_zmalloc(NULL,
 			  sizeof(*rxq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	return rc;
 }
 
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.txmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_tx_queue_release(void *queue)
 {
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 		fs_tx_queue_release(txq);
 		dev->data->tx_queues[tx_queue_id] = NULL;
 	}
-	/*
-	 * Don't verify queue offloads for applications which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Tx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      tx_conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      PRIV(dev)->infos.tx_offload_capa |
-		      PRIV(dev)->infos.tx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	txq = rte_zmalloc("ethdev TX queue",
 			  sizeof(*txq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7dfeddf..7a59530 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -448,29 +448,13 @@ static int
 fm10k_dev_configure(struct rte_eth_dev *dev)
 {
 	int ret;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+	if ((dev->data->dev_conf.rxmode.offloads &
+	     DEV_RX_OFFLOAD_CRC_STRIP) == 0)
 		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
 
-	fm10k_dev_infos_get(dev, &dev_info);
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
 	if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	struct fm10k_rx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			fm10k_get_rx_port_offloads_capa(dev),
-			fm10k_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/* make sure the mempool element size can account for alignment. */
 	if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->queue_id = queue_id;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	if (handle_rxconf(q, conf))
 		return -EINVAL;
 
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct fm10k_tx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			fm10k_get_tx_port_offloads_capa(dev),
-			fm10k_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* make sure a valid number of descriptors have been requested */
 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->port_id = dev->data->port_id;
 	q->queue_id = queue_id;
 	q->txq_flags = conf->txq_flags;
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 62985c3..05b4950 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
-	if ((requested & dev_info.rx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_first_queue(uint16_t idx, void **queues, int num)
 {
 	uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/* Allocate the maximun number of RX ring hardware descriptor. */
 	len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
-	if ((requested & dev_info.tx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 				struct i40e_tx_queue *txq)
 {
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t reg_idx, i, base, bsf, tc_mapping;
 	int q_offset;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			dev_info.tx_offload_capa);
-			return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->queue_id = queue_idx;
 	txq->reg_idx = reg_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->vsi = vsi;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 91179e9..320ab21 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ixgbe_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	ixgbevf_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2892436..7de6f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			ixgbe_get_tx_queue_offloads(dev),
-			ixgbe_get_tx_port_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			ixgbe_get_rx_port_offloads(dev),
-			ixgbe_get_rx_queue_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 65f0994..35c44ff 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	};
 	int ret;
 	uint32_t crc_present;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      (mlx4_get_rx_port_offloads(priv) |
-		       mlx4_get_rx_queue_offloads(priv)));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		     (void *)dev, idx, desc);
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		crc_present = 0;
 	} else if (priv->hw_fcs_strip) {
 		crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index fe6a8e0..2443333 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		},
 	};
 	int ret;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      mlx4_get_tx_port_offloads(priv));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_tx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
 					   DEV_TX_OFFLOAD_UDP_CKSUM |
 					   DEV_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads &
+			      (offloads &
 			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 746b94f..df369cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	unsigned int reta_idx_n;
 	const uint8_t use_app_rss_key =
 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-	uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t supp_rx_offloads =
-		(mlx5_get_rx_port_offloads() |
-		 mlx5_get_rx_queue_offloads(dev));
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret = 0;
 
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Tx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, tx_offloads, supp_tx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
-	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Rx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, rx_offloads, supp_rx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (use_app_rss_key &&
 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 	     rss_hash_default_key_len)) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 126412d..cea93cf 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-	uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return 0;
-	if (((port_offloads ^ offloads) & port_supp_offloads))
-		return 0;
-	return 1;
-}
-
-/**
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		rte_errno = EOVERFLOW;
 		return -rte_errno;
 	}
-	if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-		DRV_LOG(ERR,
-			"port %u Rx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(mlx5_get_rx_port_offloads() |
-			 mlx5_get_rx_queue_offloads(dev)));
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (!mlx5_rxq_releasable(dev, idx)) {
 		DRV_LOG(ERR, "port %u unable to release queue index %u",
 			dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 */
 	const uint16_t desc_n =
 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	uint64_t offloads = conf->offloads |
+			   dev->data->dev_conf.rxmode.offloads;
 
 	tmpl = rte_calloc_socket("RXQ", 1,
 				 sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-	tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		tmpl->rxq.crc_present = 0;
 	} else if (config->hw_fcs_strip) {
 		tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4435874..fb7b4ad 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
-	/* There are no Tx offloads which are per queue. */
-	if ((offloads & port_supp_offloads) != offloads)
-		return 0;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return 0;
-	return 1;
-}
-
-/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_txq_ctrl *txq_ctrl =
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		DRV_LOG(ERR,
-			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			mlx5_get_tx_port_offloads(dev));
-		return -rte_errno;
-	}
 	if (desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	assert(desc > MLX5_TX_COMP_THRESH);
-	tmpl->txq.offloads = conf->offloads;
+	tmpl->txq.offloads = conf->offloads |
+			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 05998bf..c9d85ca 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-		RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.split_hdr_size) {
 		RTE_LOG(INFO, PMD, "Split headers not supported\n");
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
-		RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
-		return -EINVAL;
-	}
-
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		RTE_LOG(INFO, PMD, "LRO not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
 }
 
 /**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = MRVL_RX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the receive queue.
  *
  * @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	uint32_t min_size,
 		 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	int ret, tc, inq;
+	uint64_t offloads;
 
-	if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
 		/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled =
-		dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
 }
 
 /**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = MRVL_TX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the transmit queue.
  *
  * @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mrvl_priv *priv = dev->data->dev_private;
 	struct mrvl_txq *txq;
 
-	if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
-
 	if (dev->data->tx_queues[idx]) {
 		rte_free(dev->data->tx_queues[idx]);
 		dev->data->tx_queues[idx] = NULL;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 048324e..d3b8ec0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX offloads */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
-		PMD_INIT_LOG(INFO, "rxmode does not support split header");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
-		PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
-		PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
-		PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
 	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
 		PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
 
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
-		PMD_INIT_LOG(INFO, "Scatter not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
-		PMD_INIT_LOG(INFO, "timestamp offfload not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "security offload not supported");
-		return -EINVAL;
-	}
-
-	/* checking TX offloads */
-	if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-		PMD_INIT_LOG(INFO, "vlan insert offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
-		PMD_INIT_LOG(INFO, "TX checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
-		PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
-		PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
-		PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
-		PMD_INIT_LOG(INFO, "tunneling offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
-		PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
-		PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
-		PMD_INIT_LOG(INFO, "TX multisegs  not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
-		PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "TX security offload not supported");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-
-	if (rx_conf->offloads != rxmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
-				  queue_idx);
-		PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
-		PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
-		return -EINVAL;
-	}
-
 	/*
 	 * Free memory prior to re-allocation if needed. This is the case after
 	 * calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	txmode = &dev_conf->txmode;
-
-	if (tx_conf->offloads != txmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
-				  queue_idx);
-		return -EINVAL;
-	}
-
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 				    tx_conf->tx_free_thresh :
 				    DEFAULT_TX_FREE_THRESH);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 04120f5..4b14b8f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	configured_offloads = rxmode->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	configured_offloads = txmode->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
+		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
 static int
 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			    uint16_t nb_desc, unsigned int socket_id,
-			    const struct rte_eth_txconf *tx_conf)
+			    const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 	struct octeontx_txq *txq = NULL;
 	uint16_t dq_num;
 	int res = 0;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		dev->data->tx_queues[qidx] = NULL;
 	}
 
-	configured_offloads = tx_conf->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
-		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
 	/* Allocating tx queue data structure */
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
 				 RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint8_t gaura;
 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	port = nic->port_id;
 
-	configured_offloads = rx_conf->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 	/* Rx deferred start is not supported */
 	if (rx_conf->rx_deferred_start) {
 		octeontx_log_err("rx deferred start not supported");
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;
 
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..dbdd000 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 	}
 }
 
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
-			     sfc_rx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_rx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
-		   const struct rte_eth_rxconf *rx_conf)
+		   const struct rte_eth_rxconf *rx_conf,
+		   uint64_t offloads)
 {
 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
 				      sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
 		rc = EINVAL;
 	}
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+	if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
 	    DEV_RX_OFFLOAD_CHECKSUM)
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
 
 	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
 
-	if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -998,7 +973,8 @@ int
 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_rx_desc, unsigned int socket_id,
 	     const struct rte_eth_rxconf *rx_conf,
-	     struct rte_mempool *mb_pool)
+	     struct rte_mempool *mb_pool,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
 
-	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	}
 
 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
 			"object size is too small", sw_index);
 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags =
-		(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8..2898fe5 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
 int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
 		 uint16_t nb_rx_desc, unsigned int socket_id,
 		 const struct rte_eth_rxconf *rx_conf,
-		 struct rte_mempool *mb_pool);
+		 struct rte_mempool *mb_pool,
+		 uint64_t offloads);
 void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1cd08d8..a4a21fa 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 }
 
 static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
-			     sfc_tx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_tx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
-static int
 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
-		   const struct rte_eth_txconf *tx_conf)
+		   const struct rte_eth_txconf *tx_conf,
+		   uint64_t offloads)
 {
 	int rc = 0;
 
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
 
-	if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
 int
 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_tx_desc, unsigned int socket_id,
-	     const struct rte_eth_txconf *tx_conf)
+	     const struct rte_eth_txconf *tx_conf,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(txq_entries >= nb_tx_desc);
 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
 
-	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
 		SFC_TX_DEFAULT_FREE_THRESH;
 	txq->flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
 			   socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	info.max_fill_level = txq_max_fill_level;
 	info.free_thresh = txq->free_thresh;
 	info.flags = tx_conf->txq_flags;
-	info.offloads = tx_conf->offloads;
+	info.offloads = offloads;
 	info.txq_entries = txq_info->entries;
 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
 	info.txq_hw_ring = txq->mem.esm_base;
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13..d2b2c4d 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
 
 int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		 uint16_t nb_tx_desc, unsigned int socket_id,
-		 const struct rte_eth_txconf *tx_conf);
+		 const struct rte_eth_txconf *tx_conf,
+		 uint64_t offloads);
 void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 
 void sfc_tx_qflush_done(struct sfc_txq *txq);
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 172a7ba..78fe89b 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
 	       DEV_RX_OFFLOAD_CRC_STRIP;
 }
 
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
 	       DEV_TX_OFFLOAD_TCP_CKSUM;
 }
 
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
 	       unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
 static int
 tap_dev_configure(struct rte_eth_dev *dev)
 {
-	uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa();
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, supp_tx_offloads);
-		return -rte_errno;
-	}
 	if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
 		TAP_LOG(ERR,
 			"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
 		return -1;
 	}
 
-	/* Verify application offloads are valid for our port and queue. */
-	if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(tap_rx_offload_get_port_capa() |
-			 tap_rx_offload_get_queue_capa()));
-		return -rte_errno;
-	}
 	rxq->mp = mp;
 	rxq->trigger_seen = 1; /* force initial burst */
 	rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 	struct pmd_internals *internals = dev->data->dev_private;
 	struct tx_queue *txq;
 	int ret;
+	uint64_t offloads;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -1;
 	dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
 	txq = dev->data->tx_queues[tx_queue_id];
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
-		if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
-			txq->csum = !!(tx_conf->offloads &
-					(DEV_TX_OFFLOAD_IPV4_CKSUM |
-					 DEV_TX_OFFLOAD_UDP_CKSUM |
-					 DEV_TX_OFFLOAD_TCP_CKSUM));
-		} else {
-			rte_errno = ENOTSUP;
-			TAP_LOG(ERR,
-				"%p: Tx queue offloads 0x%" PRIx64
-				" don't match port offloads 0x%" PRIx64
-				" or supported offloads 0x%" PRIx64,
-				(void *)dev, tx_conf->offloads,
-				dev->data->dev_conf.txmode.offloads,
-				(tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa()));
-			return -rte_errno;
-		}
-	}
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->csum = !!(offloads &
+			(DEV_TX_OFFLOAD_IPV4_CKSUM |
+			 DEV_TX_OFFLOAD_UDP_CKSUM |
+			 DEV_TX_OFFLOAD_TCP_CKSUM));
+
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
 		return -1;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b673b47..23baa99 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-	conf_offloads = tx_conf->offloads;
-	offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	unsupported_offloads = conf_offloads & ~offload_capa;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->tx_free_thresh = tx_free_thresh;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	txq->offloads = conf_offloads;
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads = offloads;
 
-	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-
-	conf_offloads = rx_conf->offloads;
-
-	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	offload_capa = NICVF_RX_OFFLOAD_CAPA;
-	unsupported_offloads = conf_offloads & ~offload_capa;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+			rte_mempool_avail_count(mp), rxq->phys, offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
-	uint64_t conf_rx_offloads, rx_offload_capa;
-	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
-	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_tx_offloads, tx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	conf_rx_offloads = rxmode->offloads;
-	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
-	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_rx_offloads, rx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b..92fab21 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
 			uint16_t nb_desc,
 			unsigned int socket_id __rte_unused,
-			const struct rte_eth_rxconf *rx_conf,
+			const struct rte_eth_rxconf *rx_conf __rte_unused,
 			struct rte_mempool *mp)
 {
-	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			"Cannot allocate mbufs for rx virtqueue");
 	}
 
-	if ((rx_conf->offloads ^ rxmode->offloads) &
-	    VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
-		return -EINVAL;
-
 	dev->data->rx_queues[queue_idx] = rxvq;
 
 	return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 
 	/* cannot use simple rxtx funcs with multisegs or offloads */
-	if (tx_conf->offloads)
+	if (dev->data->dev_conf.txmode.offloads)
 		hw->use_simple_tx = 0;
 
 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index c850241..ba932ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	size_t size;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
-	if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index f6e2d98..cf85f3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   uint16_t queue_idx,
 			   uint16_t nb_desc,
 			   unsigned int socket_id,
-			   const struct rte_eth_txconf *tx_conf)
+			   const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
-		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
 			  RTE_CACHE_LINE_SIZE);
 	if (txq == NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..523a07b 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capabilities 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capabilities 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,39 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, new "
+				    "added offloads 0x" PRIx64 " must be "
+				    "within pre-queue offload capabilities 0x"
+				    PRIx64 " in %s\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1612,6 +1667,39 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d, new "
+				    "added offloads 0x" PRIx64 " must be "
+				    "within pre-queue offload capabilities 0x"
+				    PRIx64 " in %s\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
                             ` (2 preceding siblings ...)
  2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
@ 2018-05-08 10:10           ` Wei Dai
  2018-05-08 17:51             ` Andrew Rybchenko
  3 siblings, 1 reply; 60+ messages in thread
From: Wei Dai @ 2018-05-08 10:10 UTC (permalink / raw)
  To: ferruh.yigit, thomas, shahafs, qi.z.zhang; +Cc: dev, Wei Dai

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If any offloading is enabled in rte_eth_dev_configure( ) by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup( ).
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure( ) and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
otherwise return error.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup( ) function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get( ).

In the beginning of [rt]x_queue_setup( ) of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--
 doc/guides/rel_notes/release_18_05.rst  |   8 ++
 drivers/net/avf/avf_rxtx.c              |   5 +-
 drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
 drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
 drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
 drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
 drivers/net/e1000/em_ethdev.c           |  19 ----
 drivers/net/e1000/em_rxtx.c             |  64 ++-----------
 drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
 drivers/net/ena/ena_ethdev.c            |  65 +------------
 drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
 drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
 drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
 drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
 drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
 drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
 drivers/net/mlx4/mlx4_txq.c             |  42 ++------
 drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
 drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
 drivers/net/mlx5/mlx5_txq.c             |  44 +--------
 drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
 drivers/net/nfp/nfp_net.c               | 163 --------------------------------
 drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
 drivers/net/sfc/sfc_ethdev.c            |   9 +-
 drivers/net/sfc/sfc_rx.c                |  42 ++------
 drivers/net/sfc/sfc_rx.h                |   3 +-
 drivers/net/sfc/sfc_tx.c                |  42 ++------
 drivers/net/sfc/sfc_tx.h                |   3 +-
 drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
 drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
 drivers/net/virtio/virtio_rxtx.c        |   9 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
 drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
 lib/librte_ethdev/rte_ethdev.c          |  88 +++++++++++++++++
 35 files changed, 240 insertions(+), 1346 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 09a93ba..56483fb 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If any offloading is enabled in ``rte_eth_dev_configure( )`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure( )``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup( )`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type, otherwise return error.
 
 For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 0ae61e8..637e684 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -303,6 +303,14 @@ API Changes
   * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
   * ``rte_flow_query()`` API parameter changed from action type to action structure.
 
+* **ethdev: changes to offload API**
+
+   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+   ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in ``rte_eth_dev_configure( )``
+   can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added offloading which has
+   not been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled in
+   ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise return error.
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 1824ed7..e03a136 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t i, base, bsf, tc_mapping;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
 	if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > AVF_MAX_RING_DESC ||
 	    nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->free_thresh = tx_free_thresh;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	/* Allocate software ring */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 348129d..d00b99f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
-	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
-	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
 
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 3df51b5..fadf684 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
-	uint64_t unsupported_offloads, configured_offloads;
+	uint64_t configured_offloads;
 	int err;
 
 	CXGBE_FUNC_TRACE();
 	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
-	configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
-	unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
+		eth_dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_txconf *tx_conf)
+			     const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 	int err = 0;
 	unsigned int temp_nb_desc;
-	uint64_t unsupported_offloads;
-
-	unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_rxconf *rx_conf,
+			     const struct rte_eth_rxconf *rx_conf __rte_unused,
 			     struct rte_mempool *mp)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	unsigned int temp_nb_desc;
 	struct rte_eth_dev_info dev_info;
 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
-	uint64_t unsupported_offloads, configured_offloads;
-
-	configured_offloads = rx_conf->offloads;
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 6bf8c15..199afdd 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c304b82..de8d83a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA2_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA2_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA2_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA2_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 694a624..4e890ad 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
 
-	eth_em_infos_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return tx_queue_offload_capa;
 }
 
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t tsize;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			em_get_tx_port_offloads_capa(dev),
-			em_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	return 0;
 }
 
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	struct em_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	uint32_t rsize;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			em_get_rx_port_offloads_capa(dev),
-			em_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a3776a0..128ed0b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_tx_queue *txq;
 	struct e1000_hw     *hw;
 	uint32_t size;
+	uint64_t offloads;
 
-	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			igb_get_tx_port_offloads_capa(dev),
-			igb_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	return 0;
 }
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	unsigned int size;
+	uint64_t offloads;
 
-	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			igb_get_rx_port_offloads_capa(dev),
-			igb_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 41b5638..c595cc7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
-	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      const struct rte_eth_rxconf *rx_conf,
+			      __rte_unused const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
-	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    tx_offloads, adapter->tx_supported_offloads);
-		return -ENOTSUP;
-	}
-
-	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    rx_offloads, adapter->rx_supported_offloads);
-		return -ENOTSUP;
-	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
-	adapter->tx_selected_offloads = tx_offloads;
-	adapter->rx_selected_offloads = rx_offloads;
+	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
 	return 0;
 }
 
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->tx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->rx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 6d44884..368d23f 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -90,22 +90,10 @@ static int
 fs_dev_configure(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
-	uint64_t supp_tx_offloads;
-	uint64_t tx_offloads;
 	uint8_t i;
 	int ret;
 
 	fs_lock(dev, 0);
-	supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		ERROR("Some Tx offloads are not supported, "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-		      tx_offloads, supp_tx_offloads);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
 	fs_unlock(dev, 0);
 }
 
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.rxmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 		fs_rx_queue_release(rxq);
 		dev->data->rx_queues[rx_queue_id] = NULL;
 	}
-	/* Verify application offloads are valid for our port and queue. */
-	if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Rx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      rx_conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      PRIV(dev)->infos.rx_offload_capa |
-		      PRIV(dev)->infos.rx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	rxq = rte_zmalloc(NULL,
 			  sizeof(*rxq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	return rc;
 }
 
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.txmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_tx_queue_release(void *queue)
 {
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 		fs_tx_queue_release(txq);
 		dev->data->tx_queues[tx_queue_id] = NULL;
 	}
-	/*
-	 * Don't verify queue offloads for applications which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Tx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      tx_conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      PRIV(dev)->infos.tx_offload_capa |
-		      PRIV(dev)->infos.tx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	txq = rte_zmalloc("ethdev TX queue",
 			  sizeof(*txq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7dfeddf..7a59530 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -448,29 +448,13 @@ static int
 fm10k_dev_configure(struct rte_eth_dev *dev)
 {
 	int ret;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+	if ((dev->data->dev_conf.rxmode.offloads &
+	     DEV_RX_OFFLOAD_CRC_STRIP) == 0)
 		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
 
-	fm10k_dev_infos_get(dev, &dev_info);
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
 	if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	struct fm10k_rx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			fm10k_get_rx_port_offloads_capa(dev),
-			fm10k_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/* make sure the mempool element size can account for alignment. */
 	if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->queue_id = queue_id;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	if (handle_rxconf(q, conf))
 		return -EINVAL;
 
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct fm10k_tx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			fm10k_get_tx_port_offloads_capa(dev),
-			fm10k_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* make sure a valid number of descriptors have been requested */
 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->port_id = dev->data->port_id;
 	q->queue_id = queue_id;
 	q->txq_flags = conf->txq_flags;
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 62985c3..05b4950 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
-	if ((requested & dev_info.rx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_first_queue(uint16_t idx, void **queues, int num)
 {
 	uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/* Allocate the maximun number of RX ring hardware descriptor. */
 	len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
-	if ((requested & dev_info.tx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 				struct i40e_tx_queue *txq)
 {
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t reg_idx, i, base, bsf, tc_mapping;
 	int q_offset;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			dev_info.tx_offload_capa);
-			return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->queue_id = queue_idx;
 	txq->reg_idx = reg_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->vsi = vsi;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 91179e9..320ab21 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ixgbe_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	ixgbevf_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2892436..7de6f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			ixgbe_get_tx_queue_offloads(dev),
-			ixgbe_get_tx_port_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			ixgbe_get_rx_port_offloads(dev),
-			ixgbe_get_rx_queue_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 65f0994..35c44ff 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	};
 	int ret;
 	uint32_t crc_present;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      (mlx4_get_rx_port_offloads(priv) |
-		       mlx4_get_rx_queue_offloads(priv)));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		     (void *)dev, idx, desc);
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		crc_present = 0;
 	} else if (priv->hw_fcs_strip) {
 		crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index fe6a8e0..2443333 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		},
 	};
 	int ret;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      mlx4_get_tx_port_offloads(priv));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_tx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
 					   DEV_TX_OFFLOAD_UDP_CKSUM |
 					   DEV_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads &
+			      (offloads &
 			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 746b94f..df369cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	unsigned int reta_idx_n;
 	const uint8_t use_app_rss_key =
 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-	uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t supp_rx_offloads =
-		(mlx5_get_rx_port_offloads() |
-		 mlx5_get_rx_queue_offloads(dev));
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret = 0;
 
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Tx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, tx_offloads, supp_tx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
-	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Rx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, rx_offloads, supp_rx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (use_app_rss_key &&
 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 	     rss_hash_default_key_len)) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 126412d..cea93cf 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-	uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return 0;
-	if (((port_offloads ^ offloads) & port_supp_offloads))
-		return 0;
-	return 1;
-}
-
-/**
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		rte_errno = EOVERFLOW;
 		return -rte_errno;
 	}
-	if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-		DRV_LOG(ERR,
-			"port %u Rx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(mlx5_get_rx_port_offloads() |
-			 mlx5_get_rx_queue_offloads(dev)));
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (!mlx5_rxq_releasable(dev, idx)) {
 		DRV_LOG(ERR, "port %u unable to release queue index %u",
 			dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 */
 	const uint16_t desc_n =
 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	uint64_t offloads = conf->offloads |
+			   dev->data->dev_conf.rxmode.offloads;
 
 	tmpl = rte_calloc_socket("RXQ", 1,
 				 sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-	tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		tmpl->rxq.crc_present = 0;
 	} else if (config->hw_fcs_strip) {
 		tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4435874..fb7b4ad 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
-	/* There are no Tx offloads which are per queue. */
-	if ((offloads & port_supp_offloads) != offloads)
-		return 0;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return 0;
-	return 1;
-}
-
-/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_txq_ctrl *txq_ctrl =
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		DRV_LOG(ERR,
-			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			mlx5_get_tx_port_offloads(dev));
-		return -rte_errno;
-	}
 	if (desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	assert(desc > MLX5_TX_COMP_THRESH);
-	tmpl->txq.offloads = conf->offloads;
+	tmpl->txq.offloads = conf->offloads |
+			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 05998bf..c9d85ca 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-		RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.split_hdr_size) {
 		RTE_LOG(INFO, PMD, "Split headers not supported\n");
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
-		RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
-		return -EINVAL;
-	}
-
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		RTE_LOG(INFO, PMD, "LRO not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
 }
 
 /**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = MRVL_RX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the receive queue.
  *
  * @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	uint32_t min_size,
 		 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	int ret, tc, inq;
+	uint64_t offloads;
 
-	if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
 		/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled =
-		dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
 }
 
 /**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = MRVL_TX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the transmit queue.
  *
  * @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mrvl_priv *priv = dev->data->dev_private;
 	struct mrvl_txq *txq;
 
-	if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
-
 	if (dev->data->tx_queues[idx]) {
 		rte_free(dev->data->tx_queues[idx]);
 		dev->data->tx_queues[idx] = NULL;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 048324e..d3b8ec0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX offloads */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
-		PMD_INIT_LOG(INFO, "rxmode does not support split header");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
-		PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
-		PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
-		PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
 	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
 		PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
 
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
-		PMD_INIT_LOG(INFO, "Scatter not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
-		PMD_INIT_LOG(INFO, "timestamp offfload not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "security offload not supported");
-		return -EINVAL;
-	}
-
-	/* checking TX offloads */
-	if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-		PMD_INIT_LOG(INFO, "vlan insert offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
-		PMD_INIT_LOG(INFO, "TX checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
-		PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
-		PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
-		PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
-		PMD_INIT_LOG(INFO, "tunneling offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
-		PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
-		PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
-		PMD_INIT_LOG(INFO, "TX multisegs  not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
-		PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "TX security offload not supported");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-
-	if (rx_conf->offloads != rxmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
-				  queue_idx);
-		PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
-		PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
-		return -EINVAL;
-	}
-
 	/*
 	 * Free memory prior to re-allocation if needed. This is the case after
 	 * calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	txmode = &dev_conf->txmode;
-
-	if (tx_conf->offloads != txmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
-				  queue_idx);
-		return -EINVAL;
-	}
-
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 				    tx_conf->tx_free_thresh :
 				    DEFAULT_TX_FREE_THRESH);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 04120f5..4b14b8f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	configured_offloads = rxmode->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	configured_offloads = txmode->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
+		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
 static int
 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			    uint16_t nb_desc, unsigned int socket_id,
-			    const struct rte_eth_txconf *tx_conf)
+			    const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 	struct octeontx_txq *txq = NULL;
 	uint16_t dq_num;
 	int res = 0;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		dev->data->tx_queues[qidx] = NULL;
 	}
 
-	configured_offloads = tx_conf->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
-		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
 	/* Allocating tx queue data structure */
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
 				 RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint8_t gaura;
 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	port = nic->port_id;
 
-	configured_offloads = rx_conf->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 	/* Rx deferred start is not supported */
 	if (rx_conf->rx_deferred_start) {
 		octeontx_log_err("rx deferred start not supported");
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;
 
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..dbdd000 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 	}
 }
 
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
-			     sfc_rx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_rx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
-		   const struct rte_eth_rxconf *rx_conf)
+		   const struct rte_eth_rxconf *rx_conf,
+		   uint64_t offloads)
 {
 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
 				      sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
 		rc = EINVAL;
 	}
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+	if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
 	    DEV_RX_OFFLOAD_CHECKSUM)
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
 
 	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
 
-	if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -998,7 +973,8 @@ int
 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_rx_desc, unsigned int socket_id,
 	     const struct rte_eth_rxconf *rx_conf,
-	     struct rte_mempool *mb_pool)
+	     struct rte_mempool *mb_pool,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
 
-	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	}
 
 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
 			"object size is too small", sw_index);
 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags =
-		(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8..2898fe5 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
 int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
 		 uint16_t nb_rx_desc, unsigned int socket_id,
 		 const struct rte_eth_rxconf *rx_conf,
-		 struct rte_mempool *mb_pool);
+		 struct rte_mempool *mb_pool,
+		 uint64_t offloads);
 void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1cd08d8..a4a21fa 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 }
 
 static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
-			     sfc_tx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_tx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
-static int
 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
-		   const struct rte_eth_txconf *tx_conf)
+		   const struct rte_eth_txconf *tx_conf,
+		   uint64_t offloads)
 {
 	int rc = 0;
 
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
 
-	if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
 int
 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_tx_desc, unsigned int socket_id,
-	     const struct rte_eth_txconf *tx_conf)
+	     const struct rte_eth_txconf *tx_conf,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(txq_entries >= nb_tx_desc);
 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
 
-	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
 		SFC_TX_DEFAULT_FREE_THRESH;
 	txq->flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
 			   socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	info.max_fill_level = txq_max_fill_level;
 	info.free_thresh = txq->free_thresh;
 	info.flags = tx_conf->txq_flags;
-	info.offloads = tx_conf->offloads;
+	info.offloads = offloads;
 	info.txq_entries = txq_info->entries;
 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
 	info.txq_hw_ring = txq->mem.esm_base;
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13..d2b2c4d 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
 
 int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		 uint16_t nb_tx_desc, unsigned int socket_id,
-		 const struct rte_eth_txconf *tx_conf);
+		 const struct rte_eth_txconf *tx_conf,
+		 uint64_t offloads);
 void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 
 void sfc_tx_qflush_done(struct sfc_txq *txq);
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 172a7ba..78fe89b 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
 	       DEV_RX_OFFLOAD_CRC_STRIP;
 }
 
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
 	       DEV_TX_OFFLOAD_TCP_CKSUM;
 }
 
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
 	       unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
 static int
 tap_dev_configure(struct rte_eth_dev *dev)
 {
-	uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa();
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, supp_tx_offloads);
-		return -rte_errno;
-	}
 	if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
 		TAP_LOG(ERR,
 			"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
 		return -1;
 	}
 
-	/* Verify application offloads are valid for our port and queue. */
-	if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(tap_rx_offload_get_port_capa() |
-			 tap_rx_offload_get_queue_capa()));
-		return -rte_errno;
-	}
 	rxq->mp = mp;
 	rxq->trigger_seen = 1; /* force initial burst */
 	rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 	struct pmd_internals *internals = dev->data->dev_private;
 	struct tx_queue *txq;
 	int ret;
+	uint64_t offloads;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -1;
 	dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
 	txq = dev->data->tx_queues[tx_queue_id];
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
-		if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
-			txq->csum = !!(tx_conf->offloads &
-					(DEV_TX_OFFLOAD_IPV4_CKSUM |
-					 DEV_TX_OFFLOAD_UDP_CKSUM |
-					 DEV_TX_OFFLOAD_TCP_CKSUM));
-		} else {
-			rte_errno = ENOTSUP;
-			TAP_LOG(ERR,
-				"%p: Tx queue offloads 0x%" PRIx64
-				" don't match port offloads 0x%" PRIx64
-				" or supported offloads 0x%" PRIx64,
-				(void *)dev, tx_conf->offloads,
-				dev->data->dev_conf.txmode.offloads,
-				(tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa()));
-			return -rte_errno;
-		}
-	}
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->csum = !!(offloads &
+			(DEV_TX_OFFLOAD_IPV4_CKSUM |
+			 DEV_TX_OFFLOAD_UDP_CKSUM |
+			 DEV_TX_OFFLOAD_TCP_CKSUM));
+
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
 		return -1;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b673b47..23baa99 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-	conf_offloads = tx_conf->offloads;
-	offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	unsupported_offloads = conf_offloads & ~offload_capa;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->tx_free_thresh = tx_free_thresh;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	txq->offloads = conf_offloads;
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads = offloads;
 
-	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-
-	conf_offloads = rx_conf->offloads;
-
-	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	offload_capa = NICVF_RX_OFFLOAD_CAPA;
-	unsupported_offloads = conf_offloads & ~offload_capa;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+			rte_mempool_avail_count(mp), rxq->phys, offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
-	uint64_t conf_rx_offloads, rx_offload_capa;
-	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
-	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_tx_offloads, tx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	conf_rx_offloads = rxmode->offloads;
-	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
-	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_rx_offloads, rx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b..92fab21 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
 			uint16_t nb_desc,
 			unsigned int socket_id __rte_unused,
-			const struct rte_eth_rxconf *rx_conf,
+			const struct rte_eth_rxconf *rx_conf __rte_unused,
 			struct rte_mempool *mp)
 {
-	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			"Cannot allocate mbufs for rx virtqueue");
 	}
 
-	if ((rx_conf->offloads ^ rxmode->offloads) &
-	    VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
-		return -EINVAL;
-
 	dev->data->rx_queues[queue_idx] = rxvq;
 
 	return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 
 	/* cannot use simple rxtx funcs with multisegs or offloads */
-	if (tx_conf->offloads)
+	if (dev->data->dev_conf.txmode.offloads)
 		hw->use_simple_tx = 0;
 
 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index c850241..ba932ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	size_t size;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
-	if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index f6e2d98..cf85f3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   uint16_t queue_idx,
 			   uint16_t nb_desc,
 			   unsigned int socket_id,
-			   const struct rte_eth_txconf *tx_conf)
+			   const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
-		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
 			  RTE_CACHE_LINE_SIZE);
 	if (txq == NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..523a07b 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
+				    "0x%" PRIx64 " doesn't match Rx offloads "
+				    "capabilities 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.rxmode.offloads,
+				    dev_info.rx_offload_capa);
+		return -EINVAL;
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
+				    "0x%" PRIx64 " doesn't match Tx offloads "
+				    "capabilities 0x%" PRIx64 "\n",
+				    port_id,
+				    local_conf.txmode.offloads,
+				    dev_info.tx_offload_capa);
+		return -EINVAL;
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,39 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, new "
+				    "added offloads 0x" PRIx64 " must be "
+				    "within pre-queue offload capabilities 0x"
+				    PRIx64 " in %s\n",
+				    port_id,
+				    rx_queue_id,
+				    local_conf.offloads,
+				    dev_info.rx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1612,6 +1667,39 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d tx_queue_id=%d, new "
+				    "added offloads 0x" PRIx64 " must be "
+				    "within pre-queue offload capabilities 0x"
+				    PRIx64 " in %s\n",
+				    port_id,
+				    tx_queue_id,
+				    local_conf.offloads,
+				    dev_info.tx_queue_offload_capa,
+				    __func__);
+		return -EINVAL;
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
@ 2018-05-08 10:41             ` Andrew Rybchenko
  2018-05-08 11:02               ` Ferruh Yigit
  2018-05-08 11:37             ` Andrew Rybchenko
                               ` (2 subsequent siblings)
  3 siblings, 1 reply; 60+ messages in thread
From: Andrew Rybchenko @ 2018-05-08 10:41 UTC (permalink / raw)
  To: Wei Dai, ferruh.yigit, thomas, declan.doherty, linville, mw, mk,
	gtzalik, evgenys, ravi1.kumar, shepard.siegel, ed.czeck,
	john.miller, ajit.khaparde, somnath.kotur, jerin.jacob,
	maciej.czekaj, shijith.thotton, ssrinivasan, santosh.shukla,
	rahul.lakkireddy, ohndale, hyonkim, wenzhuo.lu,
	konstantin.ananyev, beilei.xing, qi.z.zhang, xiao.w.wang,
	jingjing.wu, tdu, dima, nsamsono, jianbo.liu, adrien.mazarguil,
	nelio.laranjeiro, yskoh, matan, vido, alejandro.lucero,
	emant.agrawal, shreyansh.jain, hemant.agrawal, harish.patil,
	rasesh.mody, asesh.mody, shahed.shaikh, yongwang,
	maxime.coquelin, mtetsuyah, tiwei.bie, allain.legacy,
	matt.peters, pascal.mazon, bruce.richardson, gaetan.rivet,
	jasvinder.singh, cristian.dumitrescu
  Cc: dev

On 05/08/2018 01:05 PM, Wei Dai wrote:

[...]

> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.

It looks like I've missed why PMDs should be updated to
add device offloads to per-queue offloads.
Cannot it be done on ethdev layer? PMD still can find out
which offloads were enabled on device level by
checking [rt]xmode->offlaods.

> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

[...]

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
  2018-05-05 18:59           ` Shahaf Shuler
  2018-05-07  7:15             ` Dai, Wei
@ 2018-05-08 10:58             ` Ferruh Yigit
  1 sibling, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-08 10:58 UTC (permalink / raw)
  To: Shahaf Shuler, Wei Dai, Thomas Monjalon; +Cc: dev

On 5/5/2018 7:59 PM, Shahaf Shuler wrote:
> Hi Ferruh, Dai,
>> Subject: [dpdk-dev] [PATCH v7] ethdev: check Rx/Tx offloads
>>
>> This patch check if a input requested offloading is valid or not.
>> Any reuqested offloading must be supported in the device capabilities.
>> Any offloading is disabled by default if it is not set in the parameter
>> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and [rt]x_conf-
>>> offloads to rte_eth_[rt]x_queue_setup( ).
>> From application, a pure per-port offloading can't be enabled on any queue if
>> it hasn't been enabled in rte_eth_dev_configure( ).
>> If any offloading is enabled in rte_eth_dev_configure( ) by application, it is
>> enabled on all queues no matter whether it is per-queue or per-port type
>> and no matter whether it is set or cleared in [rt]x_conf->offloads to
>> rte_eth_[rt]x_queue_setup( ).
>> The underlying PMD must be aware that the requested offloadings to PMD
>> specific queue_setup( ) function only carries those offloadings only enabled
>> for the queue but not enabled in rte_eth_dev_configure( ) and they are
>> certain per-queue type.
>>
>> This patch can make above such checking in a common way in rte_ethdev
>> layer to avoid same checking in underlying PMD.
>>
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>>
>> ---
>> v7:
>> Give the maximum freedom for upper application, only minimal checking is
>> performed in ethdev layer.
>> Only requested specific pure per-queue offloadings are input to underlying
>> PMD.
>>
>> v6:
>> No need enable an offload in queue_setup( ) if it has already been enabled
>> in dev_configure( )
>>
>> v5:
>> keep offload settings sent to PMD same as those from application
>>
>> v4:
>> fix a wrong description in git log message.
>>
>> v3:
>> rework according to dicision of offloading API in community
>>
>> v2:
>> add offloads checking in rte_eth_dev_configure( ).
>> check if a requested offloading is supported.
>> ---
>>  lib/librte_ethdev/rte_ethdev.c | 150
>> +++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 150 insertions(+)
>>
>> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
>> index e560524..0ad05eb 100644
>> --- a/lib/librte_ethdev/rte_ethdev.c
>> +++ b/lib/librte_ethdev/rte_ethdev.c
>> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t
>> nb_rx_q, uint16_t nb_tx_q,
>>  							ETHER_MAX_LEN;
>>  	}
>>
>> +	/* Any requested offloading must be within its device capabilities */
>> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
>> +	     local_conf.rxmode.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
>> offloads "
>> +				    "0x%" PRIx64 " doesn't match Rx offloads "
>> +				    "capabilities 0x%" PRIx64 "\n",
>> +				    port_id,
>> +				    local_conf.rxmode.offloads,
>> +				    dev_info.rx_offload_capa);
>> +		return -EINVAL;
> 
> While I am OK with such behavior, we should be more careful not to get into the same issue as in [1].
> There are PMD which don't report the capabilities correctly however do expect to have the offload configured.
> 
> All I am saying it is worth a check and cautious decision if it is right to include this one w/o prior application notice and at such late RC of the release. 

This is valid concern. I think this is better than [1] which was less clear than
this check but yes still a concern.

> 
>> +	}
>> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
>> +	     local_conf.txmode.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
>> offloads "
>> +				    "0x%" PRIx64 " doesn't match Tx offloads "
>> +				    "capabilities 0x%" PRIx64 "\n",
>> +				    port_id,
>> +				    local_conf.txmode.offloads,
>> +				    dev_info.tx_offload_capa);
>> +		return -EINVAL;
>> +	}
>> +
>>  	/* Check that device supports requested rss hash functions. */
>>  	if ((dev_info.flow_type_rss_offloads |
>>  	     dev_conf->rx_adv_conf.rss_conf.rss_hf) != @@ -1414,6 +1436,8
>> @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>>  	struct rte_eth_dev_info dev_info;
>>  	struct rte_eth_rxconf local_conf;
>>  	void **rxq;
>> +	uint64_t pure_port_offload_capa;
>> +	uint64_t only_enabled_for_queue;
>>
>>  	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
>>
>> @@ -1504,6 +1528,68 @@ rte_eth_rx_queue_setup(uint16_t port_id,
>> uint16_t rx_queue_id,
>>  						    &local_conf.offloads);
>>  	}
>>
>> +	/*
>> +	 * The requested offloadings by application for this queue
>> +	 * can be per-queue type or per-port type. and
>> +	 * they must be within the device offloading capabilities.
>> +	 */
>> +	if ((local_conf.offloads & dev_info.rx_offload_capa) !=
>> +	     local_conf.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
>> rx_queue_id=%d "
>> +				    "Requested offload 0x%" PRIx64 "doesn't "
>> +				    "match per-queue capability 0x%" PRIx64
>> +				    " in %s\n",
>> +				    port_id,
>> +				    rx_queue_id,
>> +				    local_conf.offloads,
>> +				    dev_info.rx_queue_offload_capa,
>> +				    __func__);
>> +		return -EINVAL;
>> +	}
>> +
>> +	/*
>> +	 * A pure per-port offloading can't be enabled for any queue
>> +	 * if it hasn't been enabled in rte_eth_dev_configure( ).
>> +	 *
>> +	 * Following pure_port_offload_capa is the capabilities which
>> +	 * can't be enabled on some queue while disabled on other queue.
>> +	 * pure_port_offload_capa must be enabled or disabled on all
>> +	 * queues at same time.
>> +	 *
>> +	 * Following only_enabled_for_queue is the offloadings which
>> +	 * are enabled for this queue but hasn't been enabled in
>> +	 * rte_eth_dev_configure( ).
>> +	 */
>> +	pure_port_offload_capa = dev_info.rx_offload_capa ^
>> +				 dev_info.rx_queue_offload_capa;
>> +	only_enabled_for_queue = (local_conf.offloads ^
>> +		dev->data->dev_conf.rxmode.offloads) &
>> local_conf.offloads;
> 
> It looks like above logic could be a lot simpler. 
> 
> How about:
> local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; // keep only the added offloads on top of the port ones
> if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
>     local_conf.offloads) { //check if added offloads are part of the queue offload capa
> 	ERROR...

+1

> 
> 
>> +	if (only_enabled_for_queue & pure_port_offload_capa) {
>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
>> rx_queue_id=%d, only "
>> +				    "enabled offload 0x%" PRIx64 "for this "
>> +				    "queue haven't been enabled in "
>> +				    "dev_configure( ), they are within "
>> +				    "pure per-port capabilities 0x%" PRIx64
> 
> Need to re-work this error message. The user doesn't know what are "pure per-port capabilities" 

+1

> 
>> +				    " in %s\n",
>> +				    port_id,
>> +				    rx_queue_id,
>> +				    only_enabled_for_queue,
>> +				    pure_port_offload_capa,
>> +				    __func__);
>> +		return -EINVAL;
>> +	}
>> +
>> +	/*
>> +	 * If an offloading has already been enabled in
>> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
>> +	 * so there is no need to enable it in this queue again.
>> +	 * The local_conf.offloads input to underlying PMD only carries
>> +	 * those offloadings which are only enabled on this queue and
>> +	 * not enabled on all queues.
>> +	 * The underlying PMD must be aware of this point.
>> +	 */
>> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
>> +
>>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
>> nb_rx_desc,
>>  					      socket_id, &local_conf, mp);
>>  	if (!ret) {
>> @@ -1549,6 +1635,8 @@ rte_eth_tx_queue_setup(uint16_t port_id,
>> uint16_t tx_queue_id,
>>  	struct rte_eth_dev_info dev_info;
>>  	struct rte_eth_txconf local_conf;
>>  	void **txq;
>> +	uint64_t pure_port_offload_capa;
>> +	uint64_t only_enabled_for_queue;
>>
>>  	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
>>
>> @@ -1612,6 +1700,68 @@ rte_eth_tx_queue_setup(uint16_t port_id,
>> uint16_t tx_queue_id,
>>  					  &local_conf.offloads);
>>  	}
>>
>> +	/*
>> +	 * The requested offloadings by application for this queue
>> +	 * can be per-queue type or per-port type. and
>> +	 * they must be within the device offloading capabilities.
>> +	 */
>> +	if ((local_conf.offloads & dev_info.tx_offload_capa) !=
>> +	     local_conf.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
>> tx_queue_id=%d "
>> +				    "Requested offload 0x%" PRIx64 "doesn't "
>> +				    "match per-queue capability 0x%" PRIx64
>> +				    " in %s\n",
>> +				    port_id,
>> +				    tx_queue_id,
>> +				    local_conf.offloads,
>> +				    dev_info.tx_queue_offload_capa,
>> +				    __func__);
>> +		return -EINVAL;
>> +	}
>> +
>> +	/*
>> +	 * A pure per-port offloading can't be enabled for any queue
>> +	 * if it hasn't been enabled in rte_eth_dev_configure( ).
>> +	 *
>> +	 * Following pure_port_offload_capa is the capabilities which
>> +	 * can't be enabled on some queue while disabled on other queue.
>> +	 * pure_port_offload_capa must be enabled or disabled on all
>> +	 * queues at same time.
>> +	 *
>> +	 * Following only_enabled_for_queue is the offloadings which
>> +	 * are enabled for this queue but hasn't been enabled in
>> +	 * rte_eth_dev_configure( ).
>> +	 */
>> +	pure_port_offload_capa = dev_info.tx_offload_capa ^
>> +				 dev_info.tx_queue_offload_capa;
>> +	only_enabled_for_queue = (local_conf.offloads ^
>> +		dev->data->dev_conf.txmode.offloads) &
>> local_conf.offloads;
> 
> Same comments as in the Rx part.  
> 
>> +	if (only_enabled_for_queue & pure_port_offload_capa) {
>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d
>> tx_queue_id=%d, only "
>> +				    "enabled offload 0x%" PRIx64 "for this "
>> +				    "queue haven't been enabled in "
>> +				    "dev_configure( ), they are within "
>> +				    "pure per-port capabilities 0x%" PRIx64
>> +				    " in %s\n",
>> +				    port_id,
>> +				    tx_queue_id,
>> +				    only_enabled_for_queue,
>> +				    pure_port_offload_capa,
>> +				    __func__);
>> +		return -EINVAL;
>> +	}
>> +
>> +	/*
>> +	 * If an offloading has already been enabled in
>> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
>> +	 * so there is no need to enable it in this queue again.
>> +	 * The local_conf.offloads input to underlying PMD only carries
>> +	 * those offloadings which are only enabled on this queue and
>> +	 * not enabled on all queues.
>> +	 * The underlying PMD must be aware of this point.
>> +	 */
>> +	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
>> +
>>  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>>  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));  }
>> --
>> 2.7.5
> 
> 
> As for Ferruh's comment
>>
>> PMDs needs to be updated for:
>> 1- Remove existing offload verify checks
>> 2- Update offload configure logic based on new values
>>
>> (1) can be part of this patch. But PMD maintainers should send update 
>> for (2) if a change required.
>>
>> cc'ed Shahaf, specially for (2) one.
> 
> I think PMD maintainers can help with that. If it will be integrated enough time before the release Mellanox PMDs can be converted by us. 
> 

Thanks.
As far as I can see in v8 Wei is adding some code [2] to keep same input for the
PMD to not break the logic in PMD. But later PMD can be updated for better
support of new offload input to the PMD.

[2]
  +	uint64_t offloads = conf->offloads |
  +			   dev->data->dev_conf.rxmode.offloads;

> 
> 
> 
> [1]
> http://dpdk.org/dev/patchwork/patch/38645/
> 
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 10:41             ` Andrew Rybchenko
@ 2018-05-08 11:02               ` Ferruh Yigit
  2018-05-08 11:22                 ` Andrew Rybchenko
  0 siblings, 1 reply; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-08 11:02 UTC (permalink / raw)
  To: Andrew Rybchenko, Wei Dai, thomas, declan.doherty, linville, mw,
	mk, gtzalik, evgenys, ravi1.kumar, shepard.siegel, ed.czeck,
	john.miller, ajit.khaparde, somnath.kotur, jerin.jacob,
	maciej.czekaj, shijith.thotton, ssrinivasan, santosh.shukla,
	rahul.lakkireddy, ohndale, hyonkim, wenzhuo.lu,
	konstantin.ananyev, beilei.xing, qi.z.zhang, xiao.w.wang,
	jingjing.wu, tdu, dima, nsamsono, jianbo.liu, adrien.mazarguil,
	nelio.laranjeiro, yskoh, matan, vido, alejandro.lucero,
	emant.agrawal, shreyansh.jain, hemant.agrawal, harish.patil,
	rasesh.mody, asesh.mody, shahed.shaikh, yongwang,
	maxime.coquelin, mtetsuyah, tiwei.bie, allain.legacy,
	matt.peters, pascal.mazon, bruce.richardson, gaetan.rivet,
	jasvinder.singh, cristian.dumitrescu
  Cc: dev

On 5/8/2018 11:41 AM, Andrew Rybchenko wrote:
> On 05/08/2018 01:05 PM, Wei Dai wrote:
> 
> [...]
> 
>> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
>> add offloads = [rt]xconf->offloads |
>> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
>> defined in 17.11 to avoid upper application broken due to offload
>> API change.
>> PMD can use the info that input [rt]xconf->offloads only carry
>> the new added per-queue offloads to do some optimization or some
>> code change on base of this patch.
> 
> It looks like I've missed why PMDs should be updated to
> add device offloads to per-queue offloads.
> Cannot it be done on ethdev layer? PMD still can find out
> which offloads were enabled on device level by
> checking [rt]xmode->offlaods.

The stripping [rt]xconf->offloads part added into ethdev layer. This changed the
input set to the PMDs.

Above coded added to keep the input same for the PMDs. Expectation is later PMD
update its code to use new input and remove this update.

> 
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> [...]
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 11:02               ` Ferruh Yigit
@ 2018-05-08 11:22                 ` Andrew Rybchenko
  0 siblings, 0 replies; 60+ messages in thread
From: Andrew Rybchenko @ 2018-05-08 11:22 UTC (permalink / raw)
  To: Ferruh Yigit, Wei Dai, thomas, declan.doherty, linville, mw, mk,
	gtzalik, evgenys, ravi1.kumar, shepard.siegel, ed.czeck,
	john.miller, ajit.khaparde, somnath.kotur, jerin.jacob,
	maciej.czekaj, shijith.thotton, ssrinivasan, santosh.shukla,
	rahul.lakkireddy, ohndale, hyonkim, wenzhuo.lu,
	konstantin.ananyev, beilei.xing, qi.z.zhang, xiao.w.wang,
	jingjing.wu, tdu, dima, nsamsono, jianbo.liu, adrien.mazarguil,
	nelio.laranjeiro, yskoh, matan, vido, alejandro.lucero,
	emant.agrawal, shreyansh.jain, hemant.agrawal, harish.patil,
	rasesh.mody, asesh.mody, shahed.shaikh, yongwang,
	maxime.coquelin, mtetsuyah, tiwei.bie, allain.legacy,
	matt.peters, pascal.mazon, bruce.richardson, gaetan.rivet,
	jasvinder.singh, cristian.dumitrescu
  Cc: dev

On 05/08/2018 02:02 PM, Ferruh Yigit wrote:
> On 5/8/2018 11:41 AM, Andrew Rybchenko wrote:
>> On 05/08/2018 01:05 PM, Wei Dai wrote:
>>
>> [...]
>>
>>> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
>>> add offloads = [rt]xconf->offloads |
>>> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
>>> defined in 17.11 to avoid upper application broken due to offload
>>> API change.
>>> PMD can use the info that input [rt]xconf->offloads only carry
>>> the new added per-queue offloads to do some optimization or some
>>> code change on base of this patch.
>> It looks like I've missed why PMDs should be updated to
>> add device offloads to per-queue offloads.
>> Cannot it be done on ethdev layer? PMD still can find out
>> which offloads were enabled on device level by
>> checking [rt]xmode->offlaods.
> The stripping [rt]xconf->offloads part added into ethdev layer. This changed the
> input set to the PMDs.
>
> Above coded added to keep the input same for the PMDs. Expectation is later PMD
> update its code to use new input and remove this update.

Thanks Ferruh. I read code too fast and lost it from my view.
I'll provide more review notes related to net/sfc.

>>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>> [...]

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
  2018-05-08 10:41             ` Andrew Rybchenko
@ 2018-05-08 11:37             ` Andrew Rybchenko
  2018-05-08 12:34               ` Dai, Wei
  2018-05-08 12:12             ` Ferruh Yigit
  2018-05-10  0:49             ` [dpdk-dev] [PATCH v9] ethdev: new Rx/Tx offloads API Wei Dai
  3 siblings, 1 reply; 60+ messages in thread
From: Andrew Rybchenko @ 2018-05-08 11:37 UTC (permalink / raw)
  To: Wei Dai, ferruh.yigit, thomas, declan.doherty, linville, mw, mk,
	gtzalik, evgenys, ravi1.kumar, shepard.siegel, ed.czeck,
	john.miller, ajit.khaparde, somnath.kotur, jerin.jacob,
	maciej.czekaj, shijith.thotton, ssrinivasan, santosh.shukla,
	rahul.lakkireddy, ohndale, hyonkim, wenzhuo.lu,
	konstantin.ananyev, beilei.xing, qi.z.zhang, xiao.w.wang,
	jingjing.wu, tdu, dima, nsamsono, jianbo.liu, adrien.mazarguil,
	nelio.laranjeiro, yskoh, matan, vido, alejandro.lucero,
	emant.agrawal, shreyansh.jain, hemant.agrawal, harish.patil,
	rasesh.mody, asesh.mody, shahed.shaikh, yongwang,
	maxime.coquelin, mtetsuyah, tiwei.bie, allain.legacy,
	matt.peters, pascal.mazon, bruce.richardson, gaetan.rivet,
	jasvinder.singh, cristian.dumitrescu
  Cc: dev, Ivan Malov

On 05/08/2018 01:05 PM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise return error.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.
>
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
>
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get( ).
>
> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
>
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

[...]

> diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
> index e42d553..fc2b254 100644
> --- a/drivers/net/sfc/sfc_ethdev.c
> +++ b/drivers/net/sfc/sfc_ethdev.c
> @@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
>   {
>   	struct sfc_adapter *sa = dev->data->dev_private;
>   	int rc;
> +	uint64_t offloads;
>   
>   	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
>   		     rx_queue_id, nb_rx_desc, socket_id);
>   
>   	sfc_adapter_lock(sa);
>   
> +	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;

I'd prefer to see it inside sfc_rx_qinit() function. It would allow to avoid
sfc_rx_qinit() function prototype changes.

>   	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
> -			  rx_conf, mb_pool);
> +			  rx_conf, mb_pool, offloads);
>   	if (rc != 0)
>   		goto fail_rx_qinit;
>   
> @@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
>   {
>   	struct sfc_adapter *sa = dev->data->dev_private;
>   	int rc;
> +	uint64_t offloads;
>   
>   	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
>   		     tx_queue_id, nb_tx_desc, socket_id);
>   
>   	sfc_adapter_lock(sa);
>   
> -	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
> +	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;

Same as above.

> +	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
> +			  tx_conf, offloads);
>   	if (rc != 0)
>   		goto fail_tx_qinit;

[...]

It looks like device level offloads are checked on ethdev layer now.
So, I think check in sfc_rx_check_mode() and sfc_tx_check_mode()
may be removed as well (see offloads_rejected).
I think it will make functions sfc_rx_log_offloads() and
sfc_tx_log_offloads() unused and these functions should be removed.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
  2018-05-08 10:41             ` Andrew Rybchenko
  2018-05-08 11:37             ` Andrew Rybchenko
@ 2018-05-08 12:12             ` Ferruh Yigit
  2018-05-09 12:45               ` Dai, Wei
  2018-05-10  0:49             ` [dpdk-dev] [PATCH v9] ethdev: new Rx/Tx offloads API Wei Dai
  3 siblings, 1 reply; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-08 12:12 UTC (permalink / raw)
  To: Wei Dai, thomas, declan.doherty, linville, mw, mk, gtzalik,
	evgenys, ravi1.kumar, shepard.siegel, ed.czeck, john.miller,
	ajit.khaparde, somnath.kotur, jerin.jacob, maciej.czekaj,
	shijith.thotton, ssrinivasan, santosh.shukla, rahul.lakkireddy,
	ohndale, hyonkim, wenzhuo.lu, konstantin.ananyev, beilei.xing,
	qi.z.zhang, xiao.w.wang, jingjing.wu, tdu, dima, nsamsono,
	jianbo.liu, adrien.mazarguil, nelio.laranjeiro, yskoh, matan,
	vido, alejandro.lucero, emant.agrawal, shreyansh.jain,
	hemant.agrawal, harish.patil, rasesh.mody, asesh.mody,
	shahed.shaikh, arybchenko, yongwang, maxime.coquelin, mtetsuyah,
	tiwei.bie, allain.legacy, matt.peters, pascal.mazon,
	bruce.richardson, gaetan.rivet, jasvinder.singh,
	cristian.dumitrescu
  Cc: dev

On 5/8/2018 11:05 AM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise return error.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get( ).
> 
> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> ---
> v8:
> Revise PMD codes to comply with offload API in v7
> update document
> 
> v7:
> Give the maximum freedom for upper application,
> only minimal checking is performed in ethdev layer.
> Only requested specific pure per-queue offloadings are input
> to underlying PMD.
> 
> v6:
> No need enable an offload in queue_setup( ) if it has already
> been enabled in dev_configure( )
> 
> v5:
> keep offload settings sent to PMD same as those from application
> 
> v4:
> fix a wrong description in git log message.
> 
> v3:
> rework according to dicision of offloading API in community
> 
> v2:
> add offloads checking in rte_eth_dev_configure( ).
> check if a requested offloading is supported.
> ---
>  doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--
>  doc/guides/rel_notes/release_18_05.rst  |   8 ++
>  drivers/net/avf/avf_rxtx.c              |   5 +-
>  drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
>  drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
>  drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
>  drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
>  drivers/net/e1000/em_ethdev.c           |  19 ----
>  drivers/net/e1000/em_rxtx.c             |  64 ++-----------
>  drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
>  drivers/net/ena/ena_ethdev.c            |  65 +------------
>  drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
>  drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
>  drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
>  drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
>  drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
>  drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
>  drivers/net/mlx4/mlx4_txq.c             |  42 ++------
>  drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
>  drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
>  drivers/net/mlx5/mlx5_txq.c             |  44 +--------
>  drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
>  drivers/net/nfp/nfp_net.c               | 163 --------------------------------
>  drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
>  drivers/net/sfc/sfc_ethdev.c            |   9 +-
>  drivers/net/sfc/sfc_rx.c                |  42 ++------
>  drivers/net/sfc/sfc_rx.h                |   3 +-
>  drivers/net/sfc/sfc_tx.c                |  42 ++------
>  drivers/net/sfc/sfc_tx.h                |   3 +-
>  drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
>  drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
>  drivers/net/virtio/virtio_rxtx.c        |   9 +-
>  drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
>  drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
>  lib/librte_ethdev/rte_ethdev.c          |  88 +++++++++++++++++
>  35 files changed, 240 insertions(+), 1346 deletions(-)

Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>


Hi Wei,

Thanks for this patch. Lets wait one more day for PMD owners to test the patch,
if there is no objection patch targets rc3 which is a few days away.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 11:37             ` Andrew Rybchenko
@ 2018-05-08 12:34               ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-08 12:34 UTC (permalink / raw)
  To: Andrew Rybchenko, Yigit, Ferruh, thomas, Doherty, Declan,
	linville, mw, mk, gtzalik, evgenys, ravi1.kumar, shepard.siegel,
	ed.czeck, john.miller, ajit.khaparde, somnath.kotur, jerin.jacob,
	maciej.czekaj, shijith.thotton, ssrinivasan, santosh.shukla,
	rahul.lakkireddy, ohndale, hyonkim, Lu, Wenzhuo, Ananyev,
	Konstantin, Xing, Beilei, Zhang, Qi Z, Wang, Xiao W, Wu,
	Jingjing, tdu, dima, nsamsono, jianbo.liu, adrien.mazarguil,
	nelio.laranjeiro, yskoh, matan, vido, alejandro.lucero,
	emant.agrawal, shreyansh.jain, hemant.agrawal, harish.patil,
	rasesh.mody, asesh.mody, shahed.shaikh, yongwang,
	maxime.coquelin, mtetsuyah, Bie, Tiwei, Legacy,
	Allain (Wind River), Peters, Matt (Wind River),
	pascal.mazon, Richardson, Bruce, gaetan.rivet, Singh, Jasvinder,
	Dumitrescu, Cristian
  Cc: dev, Ivan Malov

Hi, Andrew
See my in-line comments as below.

From: Andrew Rybchenko [mailto:arybchenko@solarflare.com] 
Sent: Tuesday, May 8, 2018 7:37 PM
To: Dai, Wei <wei.dai@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>; thomas@monjalon.net; Doherty, Declan <declan.doherty@intel.com>; linville@tuxdriver.com; mw@semihalf.com; mk@semihalf.com; gtzalik@amazon.com; evgenys@amazon.com; ravi1.kumar@amd.com; shepard.siegel@atomicrules.com; ed.czeck@atomicrules.com; john.miller@atomicrules.com; ajit.khaparde@broadcom.com; somnath.kotur@broadcom.com; jerin.jacob@caviumnetworks.com; maciej.czekaj@caviumnetworks.com; shijith.thotton@cavium.com; ssrinivasan@cavium.com; santosh.shukla@caviumnetworks.com; rahul.lakkireddy@chelsio.com; ohndale@cisco.com; hyonkim@cisco.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; tdu@semihalf.com; dima@marvell.com; nsamsono@marvell.com; jianbo.liu@arm.com; adrien.mazarguil@6wind.com; nelio.laranjeiro@6wind.com; yskoh@mellanox.com; matan@mellanox.com; vido@cesnet.cz; alejandro.lucero@netronome.com; emant.agrawal@nxp.com; shreyansh.jain@nxp.com; hemant.agrawal@nxp.com; harish.patil@cavium.com; rasesh.mody@cavium.com; asesh.mody@cavium.com; shahed.shaikh@cavium.com; yongwang@vmware.com; maxime.coquelin@redhat.com; mtetsuyah@gmail.com; Bie, Tiwei <tiwei.bie@intel.com>; Legacy, Allain (Wind River) <allain.legacy@windriver.com>; Peters, Matt (Wind River) <matt.peters@windriver.com>; pascal.mazon@6wind.com; Richardson, Bruce <bruce.richardson@intel.com>; gaetan.rivet@6wind.com; Singh, Jasvinder <jasvinder.singh@intel.com>; Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
Cc: dev@dpdk.org; Ivan Malov <Ivan.Malov@oktetlabs.ru>
Subject: Re: [PATCH v8] ethdev: check Rx/Tx offloads

On 05/08/2018 01:05 PM, Wei Dai wrote:
This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If any offloading is enabled in rte_eth_dev_configure( ) by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup( ).
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure( ) and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
otherwise return error.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup( ) function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get( ).

In the beginning of [rt]x_queue_setup( ) of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

[...]


diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;

I'd prefer to see it inside sfc_rx_qinit() function. It would allow to avoid
sfc_rx_qinit() function prototype changes.
[Wei: As rx_conf is a const argument in sfc_rx_queue_setup( ), rx_conf->offloads can't be updated.
  If sfc_rx_qinit( ) function prototype keep unchanged,  the dev->data can be deduced from the
 1st argument sa .  Andrew, if my code works well, can it be kept here,  you can change it later
 in your separate patch, OK ? ]

 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;

Same as above.


+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;

[...]

It looks like device level offloads are checked on ethdev layer now.
So, I think check in sfc_rx_check_mode () and sfc_tx_check_mode()
may be removed as well (see offloads_rejected).
I think it will make functions sfc_rx_log_offloads() and
sfc_tx_log_offloads() unused and these functions should be removed.
[Wei: sorry, I miss the sf_[rt]x_check_mode( ). Yes, offloads_rejected in this function checking can be removed.
 But as this patch is a big one, I'd like it keep as it is if it works well. And I'd like you remove such checking or 
do other changes in your own patch]
[Wei: by the way, your mail is HTML format,  I am afraid it is missed from mail archives.]

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 10:10           ` [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads Wei Dai
@ 2018-05-08 17:51             ` Andrew Rybchenko
  2018-05-09  2:10               ` Dai, Wei
  2018-05-09 14:11               ` Ferruh Yigit
  0 siblings, 2 replies; 60+ messages in thread
From: Andrew Rybchenko @ 2018-05-08 17:51 UTC (permalink / raw)
  To: Wei Dai, ferruh.yigit, thomas, shahafs, qi.z.zhang; +Cc: dev

On 05/08/2018 01:10 PM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise return error.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.
>
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
>
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get( ).
>
> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
>
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

[...]

> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
> index e560524..523a07b 100644
> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   							ETHER_MAX_LEN;
>   	}
>   
> +	/* Any requested offloading must be within its device capabilities */
> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.rxmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
> +				    "0x%" PRIx64 " doesn't match Rx offloads "
> +				    "capabilities 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.rxmode.offloads,
> +				    dev_info.rx_offload_capa);
> +		return -EINVAL;
> +	}
> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.txmode.offloads) {
> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
> +				    "0x%" PRIx64 " doesn't match Tx offloads "
> +				    "capabilities 0x%" PRIx64 "\n",
> +				    port_id,
> +				    local_conf.txmode.offloads,
> +				    dev_info.tx_offload_capa);
> +		return -EINVAL;
> +	}
> +
>   	/* Check that device supports requested rss hash functions. */
>   	if ((dev_info.flow_type_rss_offloads |
>   	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
> @@ -1504,6 +1526,39 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>   						    &local_conf.offloads);
>   	}
>   
> +	/*
> +	 * If an offloading has already been enabled in
> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> +	 * so there is no need to enable it in this queue again.
> +	 * The local_conf.offloads input to underlying PMD only carries
> +	 * those offloadings which are only enabled on this queue and
> +	 * not enabled on all queues.
> +	 * The underlying PMD must be aware of this point.
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;

In fact it has a problem with MULTI_SEG.
It is a new offload which was introduced to substitute NOMULTISEG TxQ flag.
If PMD reports the offload on device level, but application does not use
the new interface and request the offload on queue level using txq_flags
(absent NOMULTISEG flag). As the result below check fails.
Sounds like it requires dedicated code to handle it. Anything else?

> +
> +	/*
> +	 * New added offloadings for this queue are those not enabled in
> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> +	 * A pure per-port offloading can't be enabled on a queue while
> +	 * disabled on another queue. A pure per-port offloading can't
> +	 * be enabled for any queue as new added one if it hasn't been
> +	 * enabled in rte_eth_dev_configure( ).
> +	 */
> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, new "
> +				    "added offloads 0x" PRIx64 " must be "
> +				    "within pre-queue offload capabilities 0x"
> +				    PRIx64 " in %s\n",
> +				    port_id,
> +				    rx_queue_id,
> +				    local_conf.offloads,
> +				    dev_info.rx_queue_offload_capa,
> +				    __func__);
> +		return -EINVAL;
> +	}
> +
>   	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>   					      socket_id, &local_conf, mp);
>   	if (!ret) {

[...]

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 17:51             ` Andrew Rybchenko
@ 2018-05-09  2:10               ` Dai, Wei
  2018-05-09 14:11               ` Ferruh Yigit
  1 sibling, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-09  2:10 UTC (permalink / raw)
  To: Andrew Rybchenko, Yigit, Ferruh, thomas, shahafs, Zhang, Qi Z; +Cc: dev

> -----Original Message-----
> From: Andrew Rybchenko [mailto:arybchenko@solarflare.com]
> Sent: Wednesday, May 9, 2018 1:52 AM
> To: Dai, Wei <wei.dai@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>;
> thomas@monjalon.net; shahafs@mellanox.com; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
> 
> On 05/08/2018 01:10 PM, Wei Dai wrote:
> > This patch check if a input requested offloading is valid or not.
> > Any reuqested offloading must be supported in the device capabilities.
> > Any offloading is disabled by default if it is not set in the
> > parameter dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> > [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > If any offloading is enabled in rte_eth_dev_configure( ) by
> > application, it is enabled on all queues no matter whether it is
> > per-queue or per-port type and no matter whether it is set or cleared
> > in [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(
> > ), it can be enabled or disabled for individual queue in
> > ret_eth_[rt]x_queue_setup( ).
> > A new added offloading is the one which hasn't been enabled in
> > rte_eth_dev_configure( ) and is reuqested to be enabled in
> > rte_eth_[rt]x_queue_setup( ), it must be per-queue type, otherwise
> > return error.
> > The underlying PMD must be aware that the requested offloadings to PMD
> > specific queue_setup( ) function only carries those new added
> > offloadings of per-queue type.
> >
> > This patch can make above such checking in a common way in rte_ethdev
> > layer to avoid same checking in underlying PMD.
> >
> > This patch assumes that all PMDs in 18.05-rc2 have already converted
> > to offload API defined in 17.11 . It also assumes that all PMDs can
> > return correct offloading capabilities in rte_eth_dev_infos_get( ).
> >
> > In the beginning of [rt]x_queue_setup( ) of underlying PMD, add
> > offloads = [rt]xconf->offloads |
> > dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> > defined in 17.11 to avoid upper application broken due to offload API
> > change.
> > PMD can use the info that input [rt]xconf->offloads only carry the new
> > added per-queue offloads to do some optimization or some code change
> > on base of this patch.
> >
> > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> [...]
> 
> > diff --git a/lib/librte_ethdev/rte_ethdev.c
> > b/lib/librte_ethdev/rte_ethdev.c index e560524..523a07b 100644
> > --- a/lib/librte_ethdev/rte_ethdev.c
> > +++ b/lib/librte_ethdev/rte_ethdev.c
> > @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> >   							ETHER_MAX_LEN;
> >   	}
> >
> > +	/* Any requested offloading must be within its device capabilities */
> > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > +	     local_conf.rxmode.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx
> offloads "
> > +				    "0x%" PRIx64 " doesn't match Rx offloads "
> > +				    "capabilities 0x%" PRIx64 "\n",
> > +				    port_id,
> > +				    local_conf.rxmode.offloads,
> > +				    dev_info.rx_offload_capa);
> > +		return -EINVAL;
> > +	}
> > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > +	     local_conf.txmode.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx
> offloads "
> > +				    "0x%" PRIx64 " doesn't match Tx offloads "
> > +				    "capabilities 0x%" PRIx64 "\n",
> > +				    port_id,
> > +				    local_conf.txmode.offloads,
> > +				    dev_info.tx_offload_capa);
> > +		return -EINVAL;
> > +	}
> > +
> >   	/* Check that device supports requested rss hash functions. */
> >   	if ((dev_info.flow_type_rss_offloads |
> >   	     dev_conf->rx_adv_conf.rss_conf.rss_hf) != @@ -1504,6
> +1526,39
> > @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
> >   						    &local_conf.offloads);
> >   	}
> >
> > +	/*
> > +	 * If an offloading has already been enabled in
> > +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> > +	 * so there is no need to enable it in this queue again.
> > +	 * The local_conf.offloads input to underlying PMD only carries
> > +	 * those offloadings which are only enabled on this queue and
> > +	 * not enabled on all queues.
> > +	 * The underlying PMD must be aware of this point.
> > +	 */
> > +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> 
> In fact it has a problem with MULTI_SEG.
> It is a new offload which was introduced to substitute NOMULTISEG TxQ
> flag.
> If PMD reports the offload on device level, but application does not use the
> new interface and request the offload on queue level using txq_flags (absent
> NOMULTISEG flag). As the result below check fails.
> Sounds like it requires dedicated code to handle it. Anything else?
> 
Indeed, the Tx offload API before 17.11 looks per-queue level as offloading
is enabled by the argument to rte_eth_tx_queue_setup( ) for each tx queue.
Each tx queue can have different input offloading argument tx_conf->txq_flags,
But some underlying PMD has limitation that different txq_flags are not permitted.
So tx offload API before 17.11 can't reflect the device capabilities.
The new offload API can tell application what is the correct way to use tx offloading
to reduce the confusion for application.
Application developer should be aware of the version of DPDK library.
They can revise codes for the check failure according to the error message.

> > +
> > +	/*
> > +	 * New added offloadings for this queue are those not enabled in
> > +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> > +	 * A pure per-port offloading can't be enabled on a queue while
> > +	 * disabled on another queue. A pure per-port offloading can't
> > +	 * be enabled for any queue as new added one if it hasn't been
> > +	 * enabled in rte_eth_dev_configure( ).
> > +	 */
> > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > +	     local_conf.offloads) {
> > +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d,
> new "
> > +				    "added offloads 0x" PRIx64 " must be "
> > +				    "within pre-queue offload capabilities 0x"
> > +				    PRIx64 " in %s\n",
> > +				    port_id,
> > +				    rx_queue_id,
> > +				    local_conf.offloads,
> > +				    dev_info.rx_queue_offload_capa,
> > +				    __func__);
> > +		return -EINVAL;
> > +	}
> > +
> >   	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id,
> nb_rx_desc,
> >   					      socket_id, &local_conf, mp);
> >   	if (!ret) {
> 
> [...]

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 12:12             ` Ferruh Yigit
@ 2018-05-09 12:45               ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-09 12:45 UTC (permalink / raw)
  To: Yigit, Ferruh, thomas, Doherty, Declan, linville, mw, mk,
	gtzalik, evgenys, ravi1.kumar, shepard.siegel, ed.czeck,
	john.miller, ajit.khaparde, somnath.kotur, jerin.jacob,
	maciej.czekaj, shijith.thotton, ssrinivasan, santosh.shukla,
	rahul.lakkireddy, ohndale, hyonkim, Lu, Wenzhuo, Ananyev,
	Konstantin, Xing, Beilei, Zhang, Qi Z, Wang, Xiao W, Wu,
	Jingjing, tdu, dima, nsamsono, jianbo.liu, adrien.mazarguil,
	nelio.laranjeiro, yskoh, matan, vido, alejandro.lucero,
	emant.agrawal, shreyansh.jain, hemant.agrawal, harish.patil,
	rasesh.mody, asesh.mody, shahed.shaikh, arybchenko, yongwang,
	maxime.coquelin, mtetsuyah, Bie, Tiwei, Legacy,
	Allain (Wind River), Peters, Matt (Wind River),
	pascal.mazon, Richardson, Bruce, gaetan.rivet, Singh, Jasvinder,
	Dumitrescu, Cristian
  Cc: dev

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Tuesday, May 8, 2018 8:13 PM
> To: Dai, Wei <wei.dai@intel.com>; thomas@monjalon.net; Doherty, Declan
> <declan.doherty@intel.com>; linville@tuxdriver.com; mw@semihalf.com;
> mk@semihalf.com; gtzalik@amazon.com; evgenys@amazon.com;
> ravi1.kumar@amd.com; shepard.siegel@atomicrules.com;
> ed.czeck@atomicrules.com; john.miller@atomicrules.com;
> ajit.khaparde@broadcom.com; somnath.kotur@broadcom.com;
> jerin.jacob@caviumnetworks.com; maciej.czekaj@caviumnetworks.com;
> shijith.thotton@cavium.com; ssrinivasan@cavium.com;
> santosh.shukla@caviumnetworks.com; rahul.lakkireddy@chelsio.com;
> ohndale@cisco.com; hyonkim@cisco.com; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Xiao W
> <xiao.w.wang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> tdu@semihalf.com; dima@marvell.com; nsamsono@marvell.com;
> jianbo.liu@arm.com; adrien.mazarguil@6wind.com;
> nelio.laranjeiro@6wind.com; yskoh@mellanox.com; matan@mellanox.com;
> vido@cesnet.cz; alejandro.lucero@netronome.com;
> emant.agrawal@nxp.com; shreyansh.jain@nxp.com;
> hemant.agrawal@nxp.com; harish.patil@cavium.com;
> rasesh.mody@cavium.com; asesh.mody@cavium.com;
> shahed.shaikh@cavium.com; arybchenko@solarflare.com;
> yongwang@vmware.com; maxime.coquelin@redhat.com;
> mtetsuyah@gmail.com; Bie, Tiwei <tiwei.bie@intel.com>; Legacy, Allain
> (Wind River) <allain.legacy@windriver.com>; Peters, Matt (Wind River)
> <matt.peters@windriver.com>; pascal.mazon@6wind.com; Richardson,
> Bruce <bruce.richardson@intel.com>; gaetan.rivet@6wind.com; Singh,
> Jasvinder <jasvinder.singh@intel.com>; Dumitrescu, Cristian
> <cristian.dumitrescu@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v8] ethdev: check Rx/Tx offloads
> 
> On 5/8/2018 11:05 AM, Wei Dai wrote:
> > This patch check if a input requested offloading is valid or not.
> > Any reuqested offloading must be supported in the device capabilities.
> > Any offloading is disabled by default if it is not set in the
> > parameter dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> > [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > If any offloading is enabled in rte_eth_dev_configure( ) by
> > application, it is enabled on all queues no matter whether it is
> > per-queue or per-port type and no matter whether it is set or cleared
> > in [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(
> > ), it can be enabled or disabled for individual queue in
> > ret_eth_[rt]x_queue_setup( ).
> > A new added offloading is the one which hasn't been enabled in
> > rte_eth_dev_configure( ) and is reuqested to be enabled in
> > rte_eth_[rt]x_queue_setup( ), it must be per-queue type, otherwise
> > return error.
> > The underlying PMD must be aware that the requested offloadings to PMD
> > specific queue_setup( ) function only carries those new added
> > offloadings of per-queue type.
> >
> > This patch can make above such checking in a common way in rte_ethdev
> > layer to avoid same checking in underlying PMD.
> >
> > This patch assumes that all PMDs in 18.05-rc2 have already converted
> > to offload API defined in 17.11 . It also assumes that all PMDs can
> > return correct offloading capabilities in rte_eth_dev_infos_get( ).
> >
> > In the beginning of [rt]x_queue_setup( ) of underlying PMD, add
> > offloads = [rt]xconf->offloads |
> > dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> > defined in 17.11 to avoid upper application broken due to offload API
> > change.
> > PMD can use the info that input [rt]xconf->offloads only carry the new
> > added per-queue offloads to do some optimization or some code change
> > on base of this patch.
> >
> > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> >
> > ---
> > v8:
> > Revise PMD codes to comply with offload API in v7 update document
> >
> > v7:
> > Give the maximum freedom for upper application, only minimal checking
> > is performed in ethdev layer.
> > Only requested specific pure per-queue offloadings are input to
> > underlying PMD.
> >
> > v6:
> > No need enable an offload in queue_setup( ) if it has already been
> > enabled in dev_configure( )
> >
> > v5:
> > keep offload settings sent to PMD same as those from application
> >
> > v4:
> > fix a wrong description in git log message.
> >
> > v3:
> > rework according to dicision of offloading API in community
> >
> > v2:
> > add offloads checking in rte_eth_dev_configure( ).
> > check if a requested offloading is supported.
> > ---
> >  doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--
> >  doc/guides/rel_notes/release_18_05.rst  |   8 ++
> >  drivers/net/avf/avf_rxtx.c              |   5 +-
> >  drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
> >  drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
> >  drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
> >  drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
> >  drivers/net/e1000/em_ethdev.c           |  19 ----
> >  drivers/net/e1000/em_rxtx.c             |  64 ++-----------
> >  drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
> >  drivers/net/ena/ena_ethdev.c            |  65 +------------
> >  drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
> >  drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
> >  drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
> >  drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
> >  drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
> >  drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
> >  drivers/net/mlx4/mlx4_txq.c             |  42 ++------
> >  drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
> >  drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
> >  drivers/net/mlx5/mlx5_txq.c             |  44 +--------
> >  drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
> >  drivers/net/nfp/nfp_net.c               | 163
> --------------------------------
> >  drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
> >  drivers/net/sfc/sfc_ethdev.c            |   9 +-
> >  drivers/net/sfc/sfc_rx.c                |  42 ++------
> >  drivers/net/sfc/sfc_rx.h                |   3 +-
> >  drivers/net/sfc/sfc_tx.c                |  42 ++------
> >  drivers/net/sfc/sfc_tx.h                |   3 +-
> >  drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
> >  drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
> >  drivers/net/virtio/virtio_rxtx.c        |   9 +-
> >  drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
> >  drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
> >  lib/librte_ethdev/rte_ethdev.c          |  88 +++++++++++++++++
> >  35 files changed, 240 insertions(+), 1346 deletions(-)
> 
> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
> 
> 
> Hi Wei,
> 
> Thanks for this patch. Lets wait one more day for PMD owners to test the
> patch, if there is no objection patch targets rc3 which is a few days away.

This patch is created on base of 18.05-rc2.
So it may fail to be applied to some -next repositories.


^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-08 17:51             ` Andrew Rybchenko
  2018-05-09  2:10               ` Dai, Wei
@ 2018-05-09 14:11               ` Ferruh Yigit
  2018-05-09 22:40                 ` Ferruh Yigit
  1 sibling, 1 reply; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-09 14:11 UTC (permalink / raw)
  To: Andrew Rybchenko, Wei Dai, thomas, shahafs, qi.z.zhang; +Cc: dev

On 5/8/2018 6:51 PM, Andrew Rybchenko wrote:
> On 05/08/2018 01:10 PM, Wei Dai wrote:
>> This patch check if a input requested offloading is valid or not.
>> Any reuqested offloading must be supported in the device capabilities.
>> Any offloading is disabled by default if it is not set in the parameter
>> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>> If any offloading is enabled in rte_eth_dev_configure( ) by application,
>> it is enabled on all queues no matter whether it is per-queue or
>> per-port type and no matter whether it is set or cleared in
>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
>> it can be enabled or disabled for individual queue in
>> ret_eth_[rt]x_queue_setup( ).
>> A new added offloading is the one which hasn't been enabled in
>> rte_eth_dev_configure( ) and is reuqested to be enabled in
>> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
>> otherwise return error.
>> The underlying PMD must be aware that the requested offloadings
>> to PMD specific queue_setup( ) function only carries those
>> new added offloadings of per-queue type.
>>
>> This patch can make above such checking in a common way in rte_ethdev
>> layer to avoid same checking in underlying PMD.
>>
>> This patch assumes that all PMDs in 18.05-rc2 have already
>> converted to offload API defined in 17.11 . It also assumes
>> that all PMDs can return correct offloading capabilities
>> in rte_eth_dev_infos_get( ).
>>
>> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
>> add offloads = [rt]xconf->offloads |
>> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
>> defined in 17.11 to avoid upper application broken due to offload
>> API change.
>> PMD can use the info that input [rt]xconf->offloads only carry
>> the new added per-queue offloads to do some optimization or some
>> code change on base of this patch.
>>
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> [...]
> 
>> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
>> index e560524..523a07b 100644
>> --- a/lib/librte_ethdev/rte_ethdev.c
>> +++ b/lib/librte_ethdev/rte_ethdev.c
>> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>>   							ETHER_MAX_LEN;
>>   	}
>>   
>> +	/* Any requested offloading must be within its device capabilities */
>> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
>> +	     local_conf.rxmode.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
>> +				    "0x%" PRIx64 " doesn't match Rx offloads "
>> +				    "capabilities 0x%" PRIx64 "\n",
>> +				    port_id,
>> +				    local_conf.rxmode.offloads,
>> +				    dev_info.rx_offload_capa);
>> +		return -EINVAL;
>> +	}
>> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
>> +	     local_conf.txmode.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
>> +				    "0x%" PRIx64 " doesn't match Tx offloads "
>> +				    "capabilities 0x%" PRIx64 "\n",
>> +				    port_id,
>> +				    local_conf.txmode.offloads,
>> +				    dev_info.tx_offload_capa);
>> +		return -EINVAL;
>> +	}
>> +
>>   	/* Check that device supports requested rss hash functions. */
>>   	if ((dev_info.flow_type_rss_offloads |
>>   	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
>> @@ -1504,6 +1526,39 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>>   						    &local_conf.offloads);
>>   	}
>>   
>> +	/*
>> +	 * If an offloading has already been enabled in
>> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
>> +	 * so there is no need to enable it in this queue again.
>> +	 * The local_conf.offloads input to underlying PMD only carries
>> +	 * those offloadings which are only enabled on this queue and
>> +	 * not enabled on all queues.
>> +	 * The underlying PMD must be aware of this point.
>> +	 */
>> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> 
> In fact it has a problem with MULTI_SEG.
> It is a new offload which was introduced to substitute NOMULTISEG TxQ flag.
> If PMD reports the offload on device level, but application does not use
> the new interface and request the offload on queue level using txq_flags
> (absent NOMULTISEG flag). As the result below check fails.
> Sounds like it requires dedicated code to handle it. Anything else?

Old applications don't set Tx offloads on configure() at all. But they set them
in tx_queue_setup().
For the PMDs that support only port offloads old applications will fail in
tx_queue_setup(), if I don't miss anything.

I suggest keeping the log but remove the error return until all applications
switch to new offloading API which should be on next release. What do you think?


btw, the log macro "RTE_PMD_DEBUG_TRACE" is in ERR log type but it controlled by
CONFIG_RTE_LIBRTE_ETHDEV_DEBUG config option which is disabled by default. So by
default we don't get the error log but the error only. I suggest switching to
new dynamic logging ethdev_log()


> 
>> +
>> +	/*
>> +	 * New added offloadings for this queue are those not enabled in
>> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
>> +	 * A pure per-port offloading can't be enabled on a queue while
>> +	 * disabled on another queue. A pure per-port offloading can't
>> +	 * be enabled for any queue as new added one if it hasn't been
>> +	 * enabled in rte_eth_dev_configure( ).
>> +	 */
>> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
>> +	     local_conf.offloads) {
>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, new "
>> +				    "added offloads 0x" PRIx64 " must be "
>> +				    "within pre-queue offload capabilities 0x"
>> +				    PRIx64 " in %s\n",
>> +				    port_id,
>> +				    rx_queue_id,
>> +				    local_conf.offloads,
>> +				    dev_info.rx_queue_offload_capa,
>> +				    __func__);
>> +		return -EINVAL;
>> +	}
>> +
>>   	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>>   					      socket_id, &local_conf, mp);
>>   	if (!ret) {
> 
> [...]
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads
  2018-05-09 14:11               ` Ferruh Yigit
@ 2018-05-09 22:40                 ` Ferruh Yigit
  0 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-09 22:40 UTC (permalink / raw)
  To: Andrew Rybchenko, Wei Dai, thomas, shahafs, qi.z.zhang; +Cc: dev

On 5/9/2018 3:11 PM, Ferruh Yigit wrote:
> On 5/8/2018 6:51 PM, Andrew Rybchenko wrote:
>> On 05/08/2018 01:10 PM, Wei Dai wrote:
>>> This patch check if a input requested offloading is valid or not.
>>> Any reuqested offloading must be supported in the device capabilities.
>>> Any offloading is disabled by default if it is not set in the parameter
>>> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
>>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>>> If any offloading is enabled in rte_eth_dev_configure( ) by application,
>>> it is enabled on all queues no matter whether it is per-queue or
>>> per-port type and no matter whether it is set or cleared in
>>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>>> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
>>> it can be enabled or disabled for individual queue in
>>> ret_eth_[rt]x_queue_setup( ).
>>> A new added offloading is the one which hasn't been enabled in
>>> rte_eth_dev_configure( ) and is reuqested to be enabled in
>>> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
>>> otherwise return error.
>>> The underlying PMD must be aware that the requested offloadings
>>> to PMD specific queue_setup( ) function only carries those
>>> new added offloadings of per-queue type.
>>>
>>> This patch can make above such checking in a common way in rte_ethdev
>>> layer to avoid same checking in underlying PMD.
>>>
>>> This patch assumes that all PMDs in 18.05-rc2 have already
>>> converted to offload API defined in 17.11 . It also assumes
>>> that all PMDs can return correct offloading capabilities
>>> in rte_eth_dev_infos_get( ).
>>>
>>> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
>>> add offloads = [rt]xconf->offloads |
>>> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
>>> defined in 17.11 to avoid upper application broken due to offload
>>> API change.
>>> PMD can use the info that input [rt]xconf->offloads only carry
>>> the new added per-queue offloads to do some optimization or some
>>> code change on base of this patch.
>>>
>>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>>
>> [...]
>>
>>> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
>>> index e560524..523a07b 100644
>>> --- a/lib/librte_ethdev/rte_ethdev.c
>>> +++ b/lib/librte_ethdev/rte_ethdev.c
>>> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>>>   							ETHER_MAX_LEN;
>>>   	}
>>>   
>>> +	/* Any requested offloading must be within its device capabilities */
>>> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
>>> +	     local_conf.rxmode.offloads) {
>>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Rx offloads "
>>> +				    "0x%" PRIx64 " doesn't match Rx offloads "
>>> +				    "capabilities 0x%" PRIx64 "\n",
>>> +				    port_id,
>>> +				    local_conf.rxmode.offloads,
>>> +				    dev_info.rx_offload_capa);
>>> +		return -EINVAL;
>>> +	}
>>> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
>>> +	     local_conf.txmode.offloads) {
>>> +		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d requested Tx offloads "
>>> +				    "0x%" PRIx64 " doesn't match Tx offloads "
>>> +				    "capabilities 0x%" PRIx64 "\n",
>>> +				    port_id,
>>> +				    local_conf.txmode.offloads,
>>> +				    dev_info.tx_offload_capa);
>>> +		return -EINVAL;
>>> +	}
>>> +
>>>   	/* Check that device supports requested rss hash functions. */
>>>   	if ((dev_info.flow_type_rss_offloads |
>>>   	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
>>> @@ -1504,6 +1526,39 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>>>   						    &local_conf.offloads);
>>>   	}
>>>   
>>> +	/*
>>> +	 * If an offloading has already been enabled in
>>> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
>>> +	 * so there is no need to enable it in this queue again.
>>> +	 * The local_conf.offloads input to underlying PMD only carries
>>> +	 * those offloadings which are only enabled on this queue and
>>> +	 * not enabled on all queues.
>>> +	 * The underlying PMD must be aware of this point.
>>> +	 */
>>> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
>>
>> In fact it has a problem with MULTI_SEG.
>> It is a new offload which was introduced to substitute NOMULTISEG TxQ flag.
>> If PMD reports the offload on device level, but application does not use
>> the new interface and request the offload on queue level using txq_flags
>> (absent NOMULTISEG flag). As the result below check fails.
>> Sounds like it requires dedicated code to handle it. Anything else?
> 
> Old applications don't set Tx offloads on configure() at all. But they set them
> in tx_queue_setup().
> For the PMDs that support only port offloads old applications will fail in
> tx_queue_setup(), if I don't miss anything.
> 
> I suggest keeping the log but remove the error return until all applications
> switch to new offloading API which should be on next release. What do you think?

Also virtual devices will give an error with this because of current testpmd
implementation [1].

We may hit similar unexpected issues, as Shahaf voiced same concern.

What do you think removing error return from all three locations,
rte_eth_dev_configure(), rte_eth_rx_queue_setup(), rte_eth_tx_queue_setup() and
just keep error log, for this release? Next release we will already update the
application interface, can add error returns next release.

[1]
https://dpdk.org/dev/patchwork/patch/39643/

> 
> 
> btw, the log macro "RTE_PMD_DEBUG_TRACE" is in ERR log type but it controlled by
> CONFIG_RTE_LIBRTE_ETHDEV_DEBUG config option which is disabled by default. So by
> default we don't get the error log but the error only. I suggest switching to
> new dynamic logging ethdev_log()
> 
> 
>>
>>> +
>>> +	/*
>>> +	 * New added offloadings for this queue are those not enabled in
>>> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
>>> +	 * A pure per-port offloading can't be enabled on a queue while
>>> +	 * disabled on another queue. A pure per-port offloading can't
>>> +	 * be enabled for any queue as new added one if it hasn't been
>>> +	 * enabled in rte_eth_dev_configure( ).
>>> +	 */
>>> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
>>> +	     local_conf.offloads) {
>>> +		RTE_PMD_DEBUG_TRACE("Ethdev port_id=%d rx_queue_id=%d, new "
>>> +				    "added offloads 0x" PRIx64 " must be "
>>> +				    "within pre-queue offload capabilities 0x"
>>> +				    PRIx64 " in %s\n",
>>> +				    port_id,
>>> +				    rx_queue_id,
>>> +				    local_conf.offloads,
>>> +				    dev_info.rx_queue_offload_capa,
>>> +				    __func__);
>>> +		return -EINVAL;
>>> +	}
>>> +
>>>   	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>>>   					      socket_id, &local_conf, mp);
>>>   	if (!ret) {
>>
>> [...]
>>
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v9] ethdev: new Rx/Tx offloads API
  2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
                               ` (2 preceding siblings ...)
  2018-05-08 12:12             ` Ferruh Yigit
@ 2018-05-10  0:49             ` Wei Dai
  2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
  3 siblings, 1 reply; 60+ messages in thread
From: Wei Dai @ 2018-05-10  0:49 UTC (permalink / raw)
  To: ferruh.yigit, thomas; +Cc: dev, Wei Dai, Qi Zhang

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If any offloading is enabled in rte_eth_dev_configure( ) by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup( ).
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure( ) and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
otherwise triger an error log.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup( ) function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get( ).

In the beginning of [rt]x_queue_setup( ) of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v9:
replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
to avoid failure of application which hasn't been completely
converted to new offload API.

v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--
 doc/guides/rel_notes/release_18_05.rst  |   8 ++
 drivers/net/avf/avf_rxtx.c              |   5 +-
 drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
 drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
 drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
 drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
 drivers/net/e1000/em_ethdev.c           |  19 ----
 drivers/net/e1000/em_rxtx.c             |  64 ++-----------
 drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
 drivers/net/ena/ena_ethdev.c            |  65 +------------
 drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
 drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
 drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
 drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
 drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
 drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
 drivers/net/mlx4/mlx4_txq.c             |  42 ++------
 drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
 drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
 drivers/net/mlx5/mlx5_txq.c             |  44 +--------
 drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
 drivers/net/nfp/nfp_net.c               | 163 --------------------------------
 drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
 drivers/net/sfc/sfc_ethdev.c            |   9 +-
 drivers/net/sfc/sfc_rx.c                |  42 ++------
 drivers/net/sfc/sfc_rx.h                |   3 +-
 drivers/net/sfc/sfc_tx.c                |  42 ++------
 drivers/net/sfc/sfc_tx.h                |   3 +-
 drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
 drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
 drivers/net/virtio/virtio_rxtx.c        |   9 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
 drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
 lib/librte_ethdev/rte_ethdev.c          |  86 +++++++++++++++++
 35 files changed, 238 insertions(+), 1346 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 09a93ba..56483fb 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If any offloading is enabled in ``rte_eth_dev_configure( )`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure( )``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup( )`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type, otherwise return error.
 
 For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 0ae61e8..637e684 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -303,6 +303,14 @@ API Changes
   * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
   * ``rte_flow_query()`` API parameter changed from action type to action structure.
 
+* **ethdev: changes to offload API**
+
+   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+   ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in ``rte_eth_dev_configure( )``
+   can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added offloading which has
+   not been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled in
+   ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise return error.
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 1824ed7..e03a136 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t i, base, bsf, tc_mapping;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
 	if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > AVF_MAX_RING_DESC ||
 	    nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->free_thresh = tx_free_thresh;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	/* Allocate software ring */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 348129d..d00b99f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
-	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
-	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
 
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 3df51b5..fadf684 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
-	uint64_t unsupported_offloads, configured_offloads;
+	uint64_t configured_offloads;
 	int err;
 
 	CXGBE_FUNC_TRACE();
 	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
-	configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
-	unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
+		eth_dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_txconf *tx_conf)
+			     const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 	int err = 0;
 	unsigned int temp_nb_desc;
-	uint64_t unsupported_offloads;
-
-	unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_rxconf *rx_conf,
+			     const struct rte_eth_rxconf *rx_conf __rte_unused,
 			     struct rte_mempool *mp)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	unsigned int temp_nb_desc;
 	struct rte_eth_dev_info dev_info;
 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
-	uint64_t unsupported_offloads, configured_offloads;
-
-	configured_offloads = rx_conf->offloads;
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 6bf8c15..199afdd 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c304b82..de8d83a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA2_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA2_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA2_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA2_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 694a624..4e890ad 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
 
-	eth_em_infos_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return tx_queue_offload_capa;
 }
 
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t tsize;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			em_get_tx_port_offloads_capa(dev),
-			em_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	return 0;
 }
 
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	struct em_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	uint32_t rsize;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			em_get_rx_port_offloads_capa(dev),
-			em_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a3776a0..128ed0b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_tx_queue *txq;
 	struct e1000_hw     *hw;
 	uint32_t size;
+	uint64_t offloads;
 
-	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			igb_get_tx_port_offloads_capa(dev),
-			igb_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	return 0;
 }
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	unsigned int size;
+	uint64_t offloads;
 
-	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			igb_get_rx_port_offloads_capa(dev),
-			igb_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 41b5638..c595cc7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
-	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      const struct rte_eth_rxconf *rx_conf,
+			      __rte_unused const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
-	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    tx_offloads, adapter->tx_supported_offloads);
-		return -ENOTSUP;
-	}
-
-	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    rx_offloads, adapter->rx_supported_offloads);
-		return -ENOTSUP;
-	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
-	adapter->tx_selected_offloads = tx_offloads;
-	adapter->rx_selected_offloads = rx_offloads;
+	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
 	return 0;
 }
 
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->tx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->rx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 6d44884..368d23f 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -90,22 +90,10 @@ static int
 fs_dev_configure(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
-	uint64_t supp_tx_offloads;
-	uint64_t tx_offloads;
 	uint8_t i;
 	int ret;
 
 	fs_lock(dev, 0);
-	supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		ERROR("Some Tx offloads are not supported, "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-		      tx_offloads, supp_tx_offloads);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
 	fs_unlock(dev, 0);
 }
 
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.rxmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 		fs_rx_queue_release(rxq);
 		dev->data->rx_queues[rx_queue_id] = NULL;
 	}
-	/* Verify application offloads are valid for our port and queue. */
-	if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Rx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      rx_conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      PRIV(dev)->infos.rx_offload_capa |
-		      PRIV(dev)->infos.rx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	rxq = rte_zmalloc(NULL,
 			  sizeof(*rxq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	return rc;
 }
 
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.txmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_tx_queue_release(void *queue)
 {
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 		fs_tx_queue_release(txq);
 		dev->data->tx_queues[tx_queue_id] = NULL;
 	}
-	/*
-	 * Don't verify queue offloads for applications which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Tx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      tx_conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      PRIV(dev)->infos.tx_offload_capa |
-		      PRIV(dev)->infos.tx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	txq = rte_zmalloc("ethdev TX queue",
 			  sizeof(*txq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7dfeddf..7a59530 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -448,29 +448,13 @@ static int
 fm10k_dev_configure(struct rte_eth_dev *dev)
 {
 	int ret;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+	if ((dev->data->dev_conf.rxmode.offloads &
+	     DEV_RX_OFFLOAD_CRC_STRIP) == 0)
 		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
 
-	fm10k_dev_infos_get(dev, &dev_info);
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
 	if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	struct fm10k_rx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			fm10k_get_rx_port_offloads_capa(dev),
-			fm10k_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/* make sure the mempool element size can account for alignment. */
 	if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->queue_id = queue_id;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	if (handle_rxconf(q, conf))
 		return -EINVAL;
 
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct fm10k_tx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			fm10k_get_tx_port_offloads_capa(dev),
-			fm10k_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* make sure a valid number of descriptors have been requested */
 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->port_id = dev->data->port_id;
 	q->queue_id = queue_id;
 	q->txq_flags = conf->txq_flags;
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 62985c3..05b4950 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
-	if ((requested & dev_info.rx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_first_queue(uint16_t idx, void **queues, int num)
 {
 	uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/* Allocate the maximun number of RX ring hardware descriptor. */
 	len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
-	if ((requested & dev_info.tx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 				struct i40e_tx_queue *txq)
 {
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t reg_idx, i, base, bsf, tc_mapping;
 	int q_offset;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			dev_info.tx_offload_capa);
-			return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->queue_id = queue_idx;
 	txq->reg_idx = reg_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->vsi = vsi;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 91179e9..320ab21 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ixgbe_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	ixgbevf_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2892436..7de6f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			ixgbe_get_tx_queue_offloads(dev),
-			ixgbe_get_tx_port_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			ixgbe_get_rx_port_offloads(dev),
-			ixgbe_get_rx_queue_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 65f0994..35c44ff 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	};
 	int ret;
 	uint32_t crc_present;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      (mlx4_get_rx_port_offloads(priv) |
-		       mlx4_get_rx_queue_offloads(priv)));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		     (void *)dev, idx, desc);
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		crc_present = 0;
 	} else if (priv->hw_fcs_strip) {
 		crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index fe6a8e0..2443333 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		},
 	};
 	int ret;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      mlx4_get_tx_port_offloads(priv));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_tx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
 					   DEV_TX_OFFLOAD_UDP_CKSUM |
 					   DEV_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads &
+			      (offloads &
 			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 746b94f..df369cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	unsigned int reta_idx_n;
 	const uint8_t use_app_rss_key =
 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-	uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t supp_rx_offloads =
-		(mlx5_get_rx_port_offloads() |
-		 mlx5_get_rx_queue_offloads(dev));
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret = 0;
 
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Tx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, tx_offloads, supp_tx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
-	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Rx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, rx_offloads, supp_rx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (use_app_rss_key &&
 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 	     rss_hash_default_key_len)) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 126412d..cea93cf 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-	uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return 0;
-	if (((port_offloads ^ offloads) & port_supp_offloads))
-		return 0;
-	return 1;
-}
-
-/**
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		rte_errno = EOVERFLOW;
 		return -rte_errno;
 	}
-	if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-		DRV_LOG(ERR,
-			"port %u Rx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(mlx5_get_rx_port_offloads() |
-			 mlx5_get_rx_queue_offloads(dev)));
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (!mlx5_rxq_releasable(dev, idx)) {
 		DRV_LOG(ERR, "port %u unable to release queue index %u",
 			dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 */
 	const uint16_t desc_n =
 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	uint64_t offloads = conf->offloads |
+			   dev->data->dev_conf.rxmode.offloads;
 
 	tmpl = rte_calloc_socket("RXQ", 1,
 				 sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-	tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		tmpl->rxq.crc_present = 0;
 	} else if (config->hw_fcs_strip) {
 		tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4435874..fb7b4ad 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
-	/* There are no Tx offloads which are per queue. */
-	if ((offloads & port_supp_offloads) != offloads)
-		return 0;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return 0;
-	return 1;
-}
-
-/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_txq_ctrl *txq_ctrl =
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		DRV_LOG(ERR,
-			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			mlx5_get_tx_port_offloads(dev));
-		return -rte_errno;
-	}
 	if (desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	assert(desc > MLX5_TX_COMP_THRESH);
-	tmpl->txq.offloads = conf->offloads;
+	tmpl->txq.offloads = conf->offloads |
+			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 05998bf..c9d85ca 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-		RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.split_hdr_size) {
 		RTE_LOG(INFO, PMD, "Split headers not supported\n");
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
-		RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
-		return -EINVAL;
-	}
-
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		RTE_LOG(INFO, PMD, "LRO not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
 }
 
 /**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = MRVL_RX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the receive queue.
  *
  * @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	uint32_t min_size,
 		 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	int ret, tc, inq;
+	uint64_t offloads;
 
-	if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
 		/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled =
-		dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
 }
 
 /**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = MRVL_TX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the transmit queue.
  *
  * @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mrvl_priv *priv = dev->data->dev_private;
 	struct mrvl_txq *txq;
 
-	if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
-
 	if (dev->data->tx_queues[idx]) {
 		rte_free(dev->data->tx_queues[idx]);
 		dev->data->tx_queues[idx] = NULL;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 048324e..d3b8ec0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX offloads */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
-		PMD_INIT_LOG(INFO, "rxmode does not support split header");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
-		PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
-		PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
-		PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
 	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
 		PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
 
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
-		PMD_INIT_LOG(INFO, "Scatter not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
-		PMD_INIT_LOG(INFO, "timestamp offfload not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "security offload not supported");
-		return -EINVAL;
-	}
-
-	/* checking TX offloads */
-	if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-		PMD_INIT_LOG(INFO, "vlan insert offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
-		PMD_INIT_LOG(INFO, "TX checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
-		PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
-		PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
-		PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
-		PMD_INIT_LOG(INFO, "tunneling offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
-		PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
-		PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
-		PMD_INIT_LOG(INFO, "TX multisegs  not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
-		PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "TX security offload not supported");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-
-	if (rx_conf->offloads != rxmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
-				  queue_idx);
-		PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
-		PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
-		return -EINVAL;
-	}
-
 	/*
 	 * Free memory prior to re-allocation if needed. This is the case after
 	 * calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	txmode = &dev_conf->txmode;
-
-	if (tx_conf->offloads != txmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
-				  queue_idx);
-		return -EINVAL;
-	}
-
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 				    tx_conf->tx_free_thresh :
 				    DEFAULT_TX_FREE_THRESH);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 04120f5..4b14b8f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	configured_offloads = rxmode->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	configured_offloads = txmode->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
+		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
 static int
 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			    uint16_t nb_desc, unsigned int socket_id,
-			    const struct rte_eth_txconf *tx_conf)
+			    const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 	struct octeontx_txq *txq = NULL;
 	uint16_t dq_num;
 	int res = 0;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		dev->data->tx_queues[qidx] = NULL;
 	}
 
-	configured_offloads = tx_conf->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
-		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
 	/* Allocating tx queue data structure */
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
 				 RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint8_t gaura;
 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	port = nic->port_id;
 
-	configured_offloads = rx_conf->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 	/* Rx deferred start is not supported */
 	if (rx_conf->rx_deferred_start) {
 		octeontx_log_err("rx deferred start not supported");
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;
 
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..dbdd000 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 	}
 }
 
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
-			     sfc_rx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_rx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
-		   const struct rte_eth_rxconf *rx_conf)
+		   const struct rte_eth_rxconf *rx_conf,
+		   uint64_t offloads)
 {
 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
 				      sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
 		rc = EINVAL;
 	}
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+	if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
 	    DEV_RX_OFFLOAD_CHECKSUM)
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
 
 	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
 
-	if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -998,7 +973,8 @@ int
 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_rx_desc, unsigned int socket_id,
 	     const struct rte_eth_rxconf *rx_conf,
-	     struct rte_mempool *mb_pool)
+	     struct rte_mempool *mb_pool,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
 
-	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	}
 
 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
 			"object size is too small", sw_index);
 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags =
-		(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8..2898fe5 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
 int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
 		 uint16_t nb_rx_desc, unsigned int socket_id,
 		 const struct rte_eth_rxconf *rx_conf,
-		 struct rte_mempool *mb_pool);
+		 struct rte_mempool *mb_pool,
+		 uint64_t offloads);
 void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1cd08d8..a4a21fa 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 }
 
 static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
-			     sfc_tx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_tx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
-static int
 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
-		   const struct rte_eth_txconf *tx_conf)
+		   const struct rte_eth_txconf *tx_conf,
+		   uint64_t offloads)
 {
 	int rc = 0;
 
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
 
-	if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
 int
 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_tx_desc, unsigned int socket_id,
-	     const struct rte_eth_txconf *tx_conf)
+	     const struct rte_eth_txconf *tx_conf,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(txq_entries >= nb_tx_desc);
 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
 
-	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
 		SFC_TX_DEFAULT_FREE_THRESH;
 	txq->flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
 			   socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	info.max_fill_level = txq_max_fill_level;
 	info.free_thresh = txq->free_thresh;
 	info.flags = tx_conf->txq_flags;
-	info.offloads = tx_conf->offloads;
+	info.offloads = offloads;
 	info.txq_entries = txq_info->entries;
 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
 	info.txq_hw_ring = txq->mem.esm_base;
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13..d2b2c4d 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
 
 int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		 uint16_t nb_tx_desc, unsigned int socket_id,
-		 const struct rte_eth_txconf *tx_conf);
+		 const struct rte_eth_txconf *tx_conf,
+		 uint64_t offloads);
 void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 
 void sfc_tx_qflush_done(struct sfc_txq *txq);
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 172a7ba..78fe89b 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
 	       DEV_RX_OFFLOAD_CRC_STRIP;
 }
 
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
 	       DEV_TX_OFFLOAD_TCP_CKSUM;
 }
 
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
 	       unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
 static int
 tap_dev_configure(struct rte_eth_dev *dev)
 {
-	uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa();
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, supp_tx_offloads);
-		return -rte_errno;
-	}
 	if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
 		TAP_LOG(ERR,
 			"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
 		return -1;
 	}
 
-	/* Verify application offloads are valid for our port and queue. */
-	if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(tap_rx_offload_get_port_capa() |
-			 tap_rx_offload_get_queue_capa()));
-		return -rte_errno;
-	}
 	rxq->mp = mp;
 	rxq->trigger_seen = 1; /* force initial burst */
 	rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 	struct pmd_internals *internals = dev->data->dev_private;
 	struct tx_queue *txq;
 	int ret;
+	uint64_t offloads;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -1;
 	dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
 	txq = dev->data->tx_queues[tx_queue_id];
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
-		if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
-			txq->csum = !!(tx_conf->offloads &
-					(DEV_TX_OFFLOAD_IPV4_CKSUM |
-					 DEV_TX_OFFLOAD_UDP_CKSUM |
-					 DEV_TX_OFFLOAD_TCP_CKSUM));
-		} else {
-			rte_errno = ENOTSUP;
-			TAP_LOG(ERR,
-				"%p: Tx queue offloads 0x%" PRIx64
-				" don't match port offloads 0x%" PRIx64
-				" or supported offloads 0x%" PRIx64,
-				(void *)dev, tx_conf->offloads,
-				dev->data->dev_conf.txmode.offloads,
-				(tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa()));
-			return -rte_errno;
-		}
-	}
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->csum = !!(offloads &
+			(DEV_TX_OFFLOAD_IPV4_CKSUM |
+			 DEV_TX_OFFLOAD_UDP_CKSUM |
+			 DEV_TX_OFFLOAD_TCP_CKSUM));
+
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
 		return -1;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b673b47..23baa99 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-	conf_offloads = tx_conf->offloads;
-	offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	unsupported_offloads = conf_offloads & ~offload_capa;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->tx_free_thresh = tx_free_thresh;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	txq->offloads = conf_offloads;
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads = offloads;
 
-	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-
-	conf_offloads = rx_conf->offloads;
-
-	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	offload_capa = NICVF_RX_OFFLOAD_CAPA;
-	unsupported_offloads = conf_offloads & ~offload_capa;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+			rte_mempool_avail_count(mp), rxq->phys, offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
-	uint64_t conf_rx_offloads, rx_offload_capa;
-	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
-	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_tx_offloads, tx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	conf_rx_offloads = rxmode->offloads;
-	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
-	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_rx_offloads, rx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b..92fab21 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
 			uint16_t nb_desc,
 			unsigned int socket_id __rte_unused,
-			const struct rte_eth_rxconf *rx_conf,
+			const struct rte_eth_rxconf *rx_conf __rte_unused,
 			struct rte_mempool *mp)
 {
-	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			"Cannot allocate mbufs for rx virtqueue");
 	}
 
-	if ((rx_conf->offloads ^ rxmode->offloads) &
-	    VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
-		return -EINVAL;
-
 	dev->data->rx_queues[queue_idx] = rxvq;
 
 	return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 
 	/* cannot use simple rxtx funcs with multisegs or offloads */
-	if (tx_conf->offloads)
+	if (dev->data->dev_conf.txmode.offloads)
 		hw->use_simple_tx = 0;
 
 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index c850241..ba932ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	size_t size;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
-	if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index f6e2d98..cf85f3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   uint16_t queue_idx,
 			   uint16_t nb_desc,
 			   unsigned int socket_id,
-			   const struct rte_eth_txconf *tx_conf)
+			   const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
-		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
 			  RTE_CACHE_LINE_SIZE);
 	if (txq == NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..ddfd020 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
+				"0x%" PRIx64 " doesn't match Rx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.rxmode.offloads,
+				dev_info.rx_offload_capa,
+				__func__);
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
+				"0x%" PRIx64 " doesn't match Tx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.txmode.offloads,
+				dev_info.tx_offload_capa,
+				__func__);
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,38 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
+				"added offloads 0x" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x"
+				PRIx64 " in %s( )\n",
+				port_id,
+				rx_queue_id,
+				local_conf.offloads,
+				dev_info.rx_queue_offload_capa,
+				__func__);
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1612,6 +1666,38 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
+				"added offloads 0x" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x"
+				PRIx64 " in %s( )\n",
+				port_id,
+				tx_queue_id,
+				local_conf.offloads,
+				dev_info.tx_queue_offload_capa,
+				__func__);
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  0:49             ` [dpdk-dev] [PATCH v9] ethdev: new Rx/Tx offloads API Wei Dai
@ 2018-05-10  0:56               ` Wei Dai
  2018-05-10  1:28                 ` Ferruh Yigit
                                   ` (4 more replies)
  0 siblings, 5 replies; 60+ messages in thread
From: Wei Dai @ 2018-05-10  0:56 UTC (permalink / raw)
  To: ferruh.yigit, thomas; +Cc: dev, Wei Dai, Qi Zhang

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If any offloading is enabled in rte_eth_dev_configure( ) by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup( ).
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure( ) and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
otherwise triger an error log.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup( ) function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get( ).

In the beginning of [rt]x_queue_setup( ) of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v10:
sorry, miss the code change, fix the buidling error

v9:
replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
to avoid failure of application which hasn't been completely
converted to new offload API.

v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--
 doc/guides/rel_notes/release_18_05.rst  |   8 ++
 drivers/net/avf/avf_rxtx.c              |   5 +-
 drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
 drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
 drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
 drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
 drivers/net/e1000/em_ethdev.c           |  19 ----
 drivers/net/e1000/em_rxtx.c             |  64 ++-----------
 drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
 drivers/net/ena/ena_ethdev.c            |  65 +------------
 drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
 drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
 drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
 drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
 drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
 drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
 drivers/net/mlx4/mlx4_txq.c             |  42 ++------
 drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
 drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
 drivers/net/mlx5/mlx5_txq.c             |  44 +--------
 drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
 drivers/net/nfp/nfp_net.c               | 163 --------------------------------
 drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
 drivers/net/sfc/sfc_ethdev.c            |   9 +-
 drivers/net/sfc/sfc_rx.c                |  42 ++------
 drivers/net/sfc/sfc_rx.h                |   3 +-
 drivers/net/sfc/sfc_tx.c                |  42 ++------
 drivers/net/sfc/sfc_tx.h                |   3 +-
 drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
 drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
 drivers/net/virtio/virtio_rxtx.c        |   9 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
 drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
 lib/librte_ethdev/rte_ethdev.c          |  86 +++++++++++++++++
 35 files changed, 238 insertions(+), 1346 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 09a93ba..56483fb 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If any offloading is enabled in ``rte_eth_dev_configure( )`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure( )``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup( )`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type, otherwise return error.
 
 For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 0ae61e8..637e684 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -303,6 +303,14 @@ API Changes
   * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
   * ``rte_flow_query()`` API parameter changed from action type to action structure.
 
+* **ethdev: changes to offload API**
+
+   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+   ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in ``rte_eth_dev_configure( )``
+   can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added offloading which has
+   not been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled in
+   ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise return error.
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 1824ed7..e03a136 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t i, base, bsf, tc_mapping;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
 	if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > AVF_MAX_RING_DESC ||
 	    nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->free_thresh = tx_free_thresh;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	/* Allocate software ring */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 348129d..d00b99f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
-	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
-	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
 
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 3df51b5..fadf684 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
-	uint64_t unsupported_offloads, configured_offloads;
+	uint64_t configured_offloads;
 	int err;
 
 	CXGBE_FUNC_TRACE();
 	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
-	configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
-	unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
+		eth_dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_txconf *tx_conf)
+			     const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 	int err = 0;
 	unsigned int temp_nb_desc;
-	uint64_t unsupported_offloads;
-
-	unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_rxconf *rx_conf,
+			     const struct rte_eth_rxconf *rx_conf __rte_unused,
 			     struct rte_mempool *mp)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	unsigned int temp_nb_desc;
 	struct rte_eth_dev_info dev_info;
 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
-	uint64_t unsupported_offloads, configured_offloads;
-
-	configured_offloads = rx_conf->offloads;
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 6bf8c15..199afdd 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c304b82..de8d83a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA2_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA2_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA2_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA2_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 694a624..4e890ad 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
 
-	eth_em_infos_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return tx_queue_offload_capa;
 }
 
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t tsize;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			em_get_tx_port_offloads_capa(dev),
-			em_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	return 0;
 }
 
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	struct em_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	uint32_t rsize;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			em_get_rx_port_offloads_capa(dev),
-			em_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a3776a0..128ed0b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_tx_queue *txq;
 	struct e1000_hw     *hw;
 	uint32_t size;
+	uint64_t offloads;
 
-	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			igb_get_tx_port_offloads_capa(dev),
-			igb_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	return 0;
 }
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	unsigned int size;
+	uint64_t offloads;
 
-	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			igb_get_rx_port_offloads_capa(dev),
-			igb_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 41b5638..c595cc7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
-	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      const struct rte_eth_rxconf *rx_conf,
+			      __rte_unused const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
-	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    tx_offloads, adapter->tx_supported_offloads);
-		return -ENOTSUP;
-	}
-
-	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    rx_offloads, adapter->rx_supported_offloads);
-		return -ENOTSUP;
-	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
-	adapter->tx_selected_offloads = tx_offloads;
-	adapter->rx_selected_offloads = rx_offloads;
+	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
 	return 0;
 }
 
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->tx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->rx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 6d44884..368d23f 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -90,22 +90,10 @@ static int
 fs_dev_configure(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
-	uint64_t supp_tx_offloads;
-	uint64_t tx_offloads;
 	uint8_t i;
 	int ret;
 
 	fs_lock(dev, 0);
-	supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		ERROR("Some Tx offloads are not supported, "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-		      tx_offloads, supp_tx_offloads);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
 	fs_unlock(dev, 0);
 }
 
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.rxmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 		fs_rx_queue_release(rxq);
 		dev->data->rx_queues[rx_queue_id] = NULL;
 	}
-	/* Verify application offloads are valid for our port and queue. */
-	if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Rx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      rx_conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      PRIV(dev)->infos.rx_offload_capa |
-		      PRIV(dev)->infos.rx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	rxq = rte_zmalloc(NULL,
 			  sizeof(*rxq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	return rc;
 }
 
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.txmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_tx_queue_release(void *queue)
 {
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 		fs_tx_queue_release(txq);
 		dev->data->tx_queues[tx_queue_id] = NULL;
 	}
-	/*
-	 * Don't verify queue offloads for applications which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Tx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      tx_conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      PRIV(dev)->infos.tx_offload_capa |
-		      PRIV(dev)->infos.tx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	txq = rte_zmalloc("ethdev TX queue",
 			  sizeof(*txq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7dfeddf..7a59530 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -448,29 +448,13 @@ static int
 fm10k_dev_configure(struct rte_eth_dev *dev)
 {
 	int ret;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+	if ((dev->data->dev_conf.rxmode.offloads &
+	     DEV_RX_OFFLOAD_CRC_STRIP) == 0)
 		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
 
-	fm10k_dev_infos_get(dev, &dev_info);
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
 	if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	struct fm10k_rx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			fm10k_get_rx_port_offloads_capa(dev),
-			fm10k_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/* make sure the mempool element size can account for alignment. */
 	if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->queue_id = queue_id;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	if (handle_rxconf(q, conf))
 		return -EINVAL;
 
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct fm10k_tx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			fm10k_get_tx_port_offloads_capa(dev),
-			fm10k_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* make sure a valid number of descriptors have been requested */
 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->port_id = dev->data->port_id;
 	q->queue_id = queue_id;
 	q->txq_flags = conf->txq_flags;
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 62985c3..05b4950 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
-	if ((requested & dev_info.rx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_first_queue(uint16_t idx, void **queues, int num)
 {
 	uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/* Allocate the maximun number of RX ring hardware descriptor. */
 	len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
-	if ((requested & dev_info.tx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 				struct i40e_tx_queue *txq)
 {
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t reg_idx, i, base, bsf, tc_mapping;
 	int q_offset;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			dev_info.tx_offload_capa);
-			return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->queue_id = queue_idx;
 	txq->reg_idx = reg_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->vsi = vsi;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 91179e9..320ab21 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ixgbe_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	ixgbevf_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2892436..7de6f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			ixgbe_get_tx_queue_offloads(dev),
-			ixgbe_get_tx_port_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			ixgbe_get_rx_port_offloads(dev),
-			ixgbe_get_rx_queue_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 65f0994..35c44ff 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	};
 	int ret;
 	uint32_t crc_present;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      (mlx4_get_rx_port_offloads(priv) |
-		       mlx4_get_rx_queue_offloads(priv)));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		     (void *)dev, idx, desc);
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		crc_present = 0;
 	} else if (priv->hw_fcs_strip) {
 		crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index fe6a8e0..2443333 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		},
 	};
 	int ret;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      mlx4_get_tx_port_offloads(priv));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_tx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
 					   DEV_TX_OFFLOAD_UDP_CKSUM |
 					   DEV_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads &
+			      (offloads &
 			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 746b94f..df369cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	unsigned int reta_idx_n;
 	const uint8_t use_app_rss_key =
 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-	uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t supp_rx_offloads =
-		(mlx5_get_rx_port_offloads() |
-		 mlx5_get_rx_queue_offloads(dev));
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret = 0;
 
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Tx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, tx_offloads, supp_tx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
-	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Rx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, rx_offloads, supp_rx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (use_app_rss_key &&
 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 	     rss_hash_default_key_len)) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 126412d..cea93cf 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-	uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return 0;
-	if (((port_offloads ^ offloads) & port_supp_offloads))
-		return 0;
-	return 1;
-}
-
-/**
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		rte_errno = EOVERFLOW;
 		return -rte_errno;
 	}
-	if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-		DRV_LOG(ERR,
-			"port %u Rx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(mlx5_get_rx_port_offloads() |
-			 mlx5_get_rx_queue_offloads(dev)));
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (!mlx5_rxq_releasable(dev, idx)) {
 		DRV_LOG(ERR, "port %u unable to release queue index %u",
 			dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 */
 	const uint16_t desc_n =
 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	uint64_t offloads = conf->offloads |
+			   dev->data->dev_conf.rxmode.offloads;
 
 	tmpl = rte_calloc_socket("RXQ", 1,
 				 sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-	tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		tmpl->rxq.crc_present = 0;
 	} else if (config->hw_fcs_strip) {
 		tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4435874..fb7b4ad 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
-	/* There are no Tx offloads which are per queue. */
-	if ((offloads & port_supp_offloads) != offloads)
-		return 0;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return 0;
-	return 1;
-}
-
-/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_txq_ctrl *txq_ctrl =
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		DRV_LOG(ERR,
-			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			mlx5_get_tx_port_offloads(dev));
-		return -rte_errno;
-	}
 	if (desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	assert(desc > MLX5_TX_COMP_THRESH);
-	tmpl->txq.offloads = conf->offloads;
+	tmpl->txq.offloads = conf->offloads |
+			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 05998bf..c9d85ca 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-		RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.split_hdr_size) {
 		RTE_LOG(INFO, PMD, "Split headers not supported\n");
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
-		RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
-		return -EINVAL;
-	}
-
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		RTE_LOG(INFO, PMD, "LRO not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
 }
 
 /**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = MRVL_RX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the receive queue.
  *
  * @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	uint32_t min_size,
 		 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	int ret, tc, inq;
+	uint64_t offloads;
 
-	if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
 		/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled =
-		dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
 }
 
 /**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = MRVL_TX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the transmit queue.
  *
  * @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mrvl_priv *priv = dev->data->dev_private;
 	struct mrvl_txq *txq;
 
-	if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
-
 	if (dev->data->tx_queues[idx]) {
 		rte_free(dev->data->tx_queues[idx]);
 		dev->data->tx_queues[idx] = NULL;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 048324e..d3b8ec0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX offloads */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
-		PMD_INIT_LOG(INFO, "rxmode does not support split header");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
-		PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
-		PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
-		PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
 	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
 		PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
 
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
-		PMD_INIT_LOG(INFO, "Scatter not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
-		PMD_INIT_LOG(INFO, "timestamp offfload not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "security offload not supported");
-		return -EINVAL;
-	}
-
-	/* checking TX offloads */
-	if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-		PMD_INIT_LOG(INFO, "vlan insert offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
-		PMD_INIT_LOG(INFO, "TX checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
-		PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
-		PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
-		PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
-		PMD_INIT_LOG(INFO, "tunneling offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
-		PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
-		PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
-		PMD_INIT_LOG(INFO, "TX multisegs  not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
-		PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "TX security offload not supported");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-
-	if (rx_conf->offloads != rxmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
-				  queue_idx);
-		PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
-		PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
-		return -EINVAL;
-	}
-
 	/*
 	 * Free memory prior to re-allocation if needed. This is the case after
 	 * calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	txmode = &dev_conf->txmode;
-
-	if (tx_conf->offloads != txmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
-				  queue_idx);
-		return -EINVAL;
-	}
-
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 				    tx_conf->tx_free_thresh :
 				    DEFAULT_TX_FREE_THRESH);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 04120f5..4b14b8f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	configured_offloads = rxmode->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	configured_offloads = txmode->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
+		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
 static int
 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			    uint16_t nb_desc, unsigned int socket_id,
-			    const struct rte_eth_txconf *tx_conf)
+			    const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 	struct octeontx_txq *txq = NULL;
 	uint16_t dq_num;
 	int res = 0;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		dev->data->tx_queues[qidx] = NULL;
 	}
 
-	configured_offloads = tx_conf->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
-		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
 	/* Allocating tx queue data structure */
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
 				 RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint8_t gaura;
 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	port = nic->port_id;
 
-	configured_offloads = rx_conf->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 	/* Rx deferred start is not supported */
 	if (rx_conf->rx_deferred_start) {
 		octeontx_log_err("rx deferred start not supported");
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;
 
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..dbdd000 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 	}
 }
 
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
-			     sfc_rx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_rx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
-		   const struct rte_eth_rxconf *rx_conf)
+		   const struct rte_eth_rxconf *rx_conf,
+		   uint64_t offloads)
 {
 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
 				      sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
 		rc = EINVAL;
 	}
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+	if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
 	    DEV_RX_OFFLOAD_CHECKSUM)
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
 
 	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
 
-	if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -998,7 +973,8 @@ int
 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_rx_desc, unsigned int socket_id,
 	     const struct rte_eth_rxconf *rx_conf,
-	     struct rte_mempool *mb_pool)
+	     struct rte_mempool *mb_pool,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
 
-	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	}
 
 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
 			"object size is too small", sw_index);
 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags =
-		(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8..2898fe5 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
 int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
 		 uint16_t nb_rx_desc, unsigned int socket_id,
 		 const struct rte_eth_rxconf *rx_conf,
-		 struct rte_mempool *mb_pool);
+		 struct rte_mempool *mb_pool,
+		 uint64_t offloads);
 void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1cd08d8..a4a21fa 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 }
 
 static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
-			     sfc_tx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_tx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
-static int
 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
-		   const struct rte_eth_txconf *tx_conf)
+		   const struct rte_eth_txconf *tx_conf,
+		   uint64_t offloads)
 {
 	int rc = 0;
 
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
 
-	if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
 int
 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_tx_desc, unsigned int socket_id,
-	     const struct rte_eth_txconf *tx_conf)
+	     const struct rte_eth_txconf *tx_conf,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(txq_entries >= nb_tx_desc);
 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
 
-	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
 		SFC_TX_DEFAULT_FREE_THRESH;
 	txq->flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
 			   socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	info.max_fill_level = txq_max_fill_level;
 	info.free_thresh = txq->free_thresh;
 	info.flags = tx_conf->txq_flags;
-	info.offloads = tx_conf->offloads;
+	info.offloads = offloads;
 	info.txq_entries = txq_info->entries;
 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
 	info.txq_hw_ring = txq->mem.esm_base;
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13..d2b2c4d 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
 
 int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		 uint16_t nb_tx_desc, unsigned int socket_id,
-		 const struct rte_eth_txconf *tx_conf);
+		 const struct rte_eth_txconf *tx_conf,
+		 uint64_t offloads);
 void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 
 void sfc_tx_qflush_done(struct sfc_txq *txq);
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 172a7ba..78fe89b 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
 	       DEV_RX_OFFLOAD_CRC_STRIP;
 }
 
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
 	       DEV_TX_OFFLOAD_TCP_CKSUM;
 }
 
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
 	       unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
 static int
 tap_dev_configure(struct rte_eth_dev *dev)
 {
-	uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa();
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, supp_tx_offloads);
-		return -rte_errno;
-	}
 	if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
 		TAP_LOG(ERR,
 			"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
 		return -1;
 	}
 
-	/* Verify application offloads are valid for our port and queue. */
-	if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(tap_rx_offload_get_port_capa() |
-			 tap_rx_offload_get_queue_capa()));
-		return -rte_errno;
-	}
 	rxq->mp = mp;
 	rxq->trigger_seen = 1; /* force initial burst */
 	rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 	struct pmd_internals *internals = dev->data->dev_private;
 	struct tx_queue *txq;
 	int ret;
+	uint64_t offloads;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -1;
 	dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
 	txq = dev->data->tx_queues[tx_queue_id];
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
-		if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
-			txq->csum = !!(tx_conf->offloads &
-					(DEV_TX_OFFLOAD_IPV4_CKSUM |
-					 DEV_TX_OFFLOAD_UDP_CKSUM |
-					 DEV_TX_OFFLOAD_TCP_CKSUM));
-		} else {
-			rte_errno = ENOTSUP;
-			TAP_LOG(ERR,
-				"%p: Tx queue offloads 0x%" PRIx64
-				" don't match port offloads 0x%" PRIx64
-				" or supported offloads 0x%" PRIx64,
-				(void *)dev, tx_conf->offloads,
-				dev->data->dev_conf.txmode.offloads,
-				(tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa()));
-			return -rte_errno;
-		}
-	}
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->csum = !!(offloads &
+			(DEV_TX_OFFLOAD_IPV4_CKSUM |
+			 DEV_TX_OFFLOAD_UDP_CKSUM |
+			 DEV_TX_OFFLOAD_TCP_CKSUM));
+
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
 		return -1;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b673b47..23baa99 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-	conf_offloads = tx_conf->offloads;
-	offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	unsupported_offloads = conf_offloads & ~offload_capa;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->tx_free_thresh = tx_free_thresh;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	txq->offloads = conf_offloads;
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads = offloads;
 
-	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-
-	conf_offloads = rx_conf->offloads;
-
-	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	offload_capa = NICVF_RX_OFFLOAD_CAPA;
-	unsupported_offloads = conf_offloads & ~offload_capa;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+			rte_mempool_avail_count(mp), rxq->phys, offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
-	uint64_t conf_rx_offloads, rx_offload_capa;
-	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
-	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_tx_offloads, tx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	conf_rx_offloads = rxmode->offloads;
-	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
-	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_rx_offloads, rx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b..92fab21 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
 			uint16_t nb_desc,
 			unsigned int socket_id __rte_unused,
-			const struct rte_eth_rxconf *rx_conf,
+			const struct rte_eth_rxconf *rx_conf __rte_unused,
 			struct rte_mempool *mp)
 {
-	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			"Cannot allocate mbufs for rx virtqueue");
 	}
 
-	if ((rx_conf->offloads ^ rxmode->offloads) &
-	    VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
-		return -EINVAL;
-
 	dev->data->rx_queues[queue_idx] = rxvq;
 
 	return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 
 	/* cannot use simple rxtx funcs with multisegs or offloads */
-	if (tx_conf->offloads)
+	if (dev->data->dev_conf.txmode.offloads)
 		hw->use_simple_tx = 0;
 
 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index c850241..ba932ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	size_t size;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
-	if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index f6e2d98..cf85f3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   uint16_t queue_idx,
 			   uint16_t nb_desc,
 			   unsigned int socket_id,
-			   const struct rte_eth_txconf *tx_conf)
+			   const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
-		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
 			  RTE_CACHE_LINE_SIZE);
 	if (txq == NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..5baa2aa 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
+				"0x%" PRIx64 " doesn't match Rx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.rxmode.offloads,
+				dev_info.rx_offload_capa,
+				__func__);
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
+				"0x%" PRIx64 " doesn't match Tx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.txmode.offloads,
+				dev_info.tx_offload_capa,
+				__func__);
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,38 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
+				"added offloads 0x%" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x%"
+				PRIx64 " in %s( )\n",
+				port_id,
+				rx_queue_id,
+				local_conf.offloads,
+				dev_info.rx_queue_offload_capa,
+				__func__);
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1612,6 +1666,38 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
+				"added offloads 0x%" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x%"
+				PRIx64 " in %s( )\n",
+				port_id,
+				tx_queue_id,
+				local_conf.offloads,
+				dev_info.tx_queue_offload_capa,
+				__func__);
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
@ 2018-05-10  1:28                 ` Ferruh Yigit
  2018-05-10  2:35                 ` Thomas Monjalon
                                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-10  1:28 UTC (permalink / raw)
  To: Wei Dai, thomas; +Cc: dev, Qi Zhang

On 5/10/2018 1:56 AM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise triger an error log.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get( ).
> 
> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net/master, thanks.


Hi Thomas,

I remember you have mentioned you will have some comments on doxygen, I get the
patch into next-net so that it can be tested, we can squash doc updates later.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
  2018-05-10  1:28                 ` Ferruh Yigit
@ 2018-05-10  2:35                 ` Thomas Monjalon
  2018-05-10 11:27                   ` Dai, Wei
  2018-05-10  9:25                 ` Andrew Rybchenko
                                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-05-10  2:35 UTC (permalink / raw)
  To: Wei Dai; +Cc: dev, ferruh.yigit, Qi Zhang

Hi,

I am checking if this patch comply with goals discussed in the survey:
	http://dpdk.org/ml/archives/dev/2018-March/094459.html

- Allow "forgetting" port offloads in queue offloads setup.

- An offload enabled at port level, cannot be disabled at queue level.

- Every queue capabilities must be reported as port capabilities.

- A capability should be reported at queue level
  only if it can be enabled on queue when it is disabled on port level.

I think some items must be updated in doxygen comments of rte_ethdev.h.
Please could you try to do a v11 for doxygen? I will review it quickly.

Examples:

	- in queue offloads:
		"No need to repeat flags already enabled at port level.
		 A flag enabled at port level, cannot be disabled at queue level."

	- in port capabilities: "(include per-queue capabilities)"

More comments below, thanks.


10/05/2018 02:56, Wei Dai:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise triger an error log.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.

Good summary.
Please forget the whitespace inside the parens.

> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.

Good

> --- a/doc/guides/prog_guide/poll_mode_drv.rst
> +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> @@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
>  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>  
>  In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
> +A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
> +A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
> +A pure per-port offloading must be enabled or disabled on all queues at the same time.
> +A per-port offloading can be enabled or disabled on all queues at the same time.

What is the difference between pure per-port and per-port here?

> +It is certain that both per-queue and pure per-port offloading are per-port type.

I don't understand this sentence.

>  The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
> +The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> +The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.

Yes

> +Any requested offloading by application must be within the device capabilities.

Yes

> +Any offloading is disabled by default if it is not set in the parameter

Yes

> +dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
> +[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
> +If any offloading is enabled in ``rte_eth_dev_configure( )`` by application,
> +it is enabled on all queues no matter whether it is per-queue or
> +per-port type and no matter whether it is set or cleared in
> +[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
> +If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure( )``,
> +it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup( )`` for individual queue.

Yes

> +A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )`` input by application
> +is the one which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled
> +in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type, otherwise return error.

Yes


> --- a/doc/guides/rel_notes/release_18_05.rst
> +++ b/doc/guides/rel_notes/release_18_05.rst
> +* **ethdev: changes to offload API**

No need of bold formatting of title in API changes.

> +
> +   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
> +   ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in ``rte_eth_dev_configure( )``
> +   can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added offloading which has
> +   not been enabled in ``rte_eth_dev_configure( )`` and is requested to be enabled in
> +   ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise return error.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
  2018-05-10  1:28                 ` Ferruh Yigit
  2018-05-10  2:35                 ` Thomas Monjalon
@ 2018-05-10  9:25                 ` Andrew Rybchenko
  2018-05-10 19:47                   ` Ferruh Yigit
  2018-05-10 11:30                 ` [dpdk-dev] [PATCH v11] " Wei Dai
  2018-05-10 21:08                 ` [dpdk-dev] [PATCH v10] " Ferruh Yigit
  4 siblings, 1 reply; 60+ messages in thread
From: Andrew Rybchenko @ 2018-05-10  9:25 UTC (permalink / raw)
  To: Wei Dai, ferruh.yigit, thomas; +Cc: dev, Qi Zhang

On 05/10/2018 03:56 AM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise triger an error log.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.
>
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
>
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get( ).
>
> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
>
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>
> ---
> v10:
> sorry, miss the code change, fix the buidling error
>
> v9:
> replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
> to avoid failure of application which hasn't been completely
> converted to new offload API.

[...]

> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
> index e560524..5baa2aa 100644
> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   							ETHER_MAX_LEN;
>   	}
>   
> +	/* Any requested offloading must be within its device capabilities */
> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.rxmode.offloads) {
> +		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
> +				"0x%" PRIx64 " doesn't match Rx offloads "
> +				"capabilities 0x%" PRIx64 " in %s( )\n",
> +				port_id,
> +				local_conf.rxmode.offloads,
> +				dev_info.rx_offload_capa,
> +				__func__);

Why is return -EINVAL removed here?
If application is not updated to use offloads, offloads is 0 and 
everything is OK.
If application is updated to use offloads, its behaviour must be consistent.
Same below for Tx device offloads.

> +	}
> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.txmode.offloads) {
> +		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
> +				"0x%" PRIx64 " doesn't match Tx offloads "
> +				"capabilities 0x%" PRIx64 " in %s( )\n",
> +				port_id,
> +				local_conf.txmode.offloads,
> +				dev_info.tx_offload_capa,
> +				__func__);
> +	}
> +
>   	/* Check that device supports requested rss hash functions. */
>   	if ((dev_info.flow_type_rss_offloads |
>   	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
> @@ -1504,6 +1526,38 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>   						    &local_conf.offloads);
>   	}
>   
> +	/*
> +	 * If an offloading has already been enabled in
> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> +	 * so there is no need to enable it in this queue again.
> +	 * The local_conf.offloads input to underlying PMD only carries
> +	 * those offloadings which are only enabled on this queue and
> +	 * not enabled on all queues.
> +	 * The underlying PMD must be aware of this point.
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> +
> +	/*
> +	 * New added offloadings for this queue are those not enabled in
> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> +	 * A pure per-port offloading can't be enabled on a queue while
> +	 * disabled on another queue. A pure per-port offloading can't
> +	 * be enabled for any queue as new added one if it hasn't been
> +	 * enabled in rte_eth_dev_configure( ).
> +	 */
> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
> +				"added offloads 0x%" PRIx64 " must be "
> +				"within pre-queue offload capabilities 0x%"
> +				PRIx64 " in %s( )\n",
> +				port_id,
> +				rx_queue_id,
> +				local_conf.offloads,
> +				dev_info.rx_queue_offload_capa,
> +				__func__);

May be it is really a good tradeoff to remove error return here.
Ideally it would be nice to see explanation here why.

> +	}
> +
>   	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>   					      socket_id, &local_conf, mp);
>   	if (!ret) {
> @@ -1612,6 +1666,38 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
>   					  &local_conf.offloads);
>   	}
>   
> +	/*
> +	 * If an offloading has already been enabled in
> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> +	 * so there is no need to enable it in this queue again.
> +	 * The local_conf.offloads input to underlying PMD only carries
> +	 * those offloadings which are only enabled on this queue and
> +	 * not enabled on all queues.
> +	 * The underlying PMD must be aware of this point.
> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
> +
> +	/*
> +	 * New added offloadings for this queue are those not enabled in
> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> +	 * A pure per-port offloading can't be enabled on a queue while
> +	 * disabled on another queue. A pure per-port offloading can't
> +	 * be enabled for any queue as new added one if it hasn't been
> +	 * enabled in rte_eth_dev_configure( ).
> +	 */
> +	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
> +				"added offloads 0x%" PRIx64 " must be "
> +				"within pre-queue offload capabilities 0x%"
> +				PRIx64 " in %s( )\n",
> +				port_id,
> +				tx_queue_id,
> +				local_conf.offloads,
> +				dev_info.tx_queue_offload_capa,
> +				__func__);
> +	}
> +
>   	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>   		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
>   }

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  2:35                 ` Thomas Monjalon
@ 2018-05-10 11:27                   ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-10 11:27 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, Yigit, Ferruh, Zhang, Qi Z

Hi, Thomas
Thanks for your feedback and guidance.

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Thursday, May 10, 2018 10:36 AM
> To: Dai, Wei <wei.dai@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
> 
> Hi,
> 
> I am checking if this patch comply with goals discussed in the survey:
> 	http://dpdk.org/ml/archives/dev/2018-March/094459.html
> 
> - Allow "forgetting" port offloads in queue offloads setup.
> 
> - An offload enabled at port level, cannot be disabled at queue level.
> 
> - Every queue capabilities must be reported as port capabilities.
> 
> - A capability should be reported at queue level
>   only if it can be enabled on queue when it is disabled on port level.
> 
> I think some items must be updated in doxygen comments of rte_ethdev.h.
> Please could you try to do a v11 for doxygen? I will review it quickly.
> 
> Examples:
> 
> 	- in queue offloads:
> 		"No need to repeat flags already enabled at port level.
> 		 A flag enabled at port level, cannot be disabled at queue level."
> 
> 	- in port capabilities: "(include per-queue capabilities)"
> 
> More comments below, thanks.
> 
> 
> 10/05/2018 02:56, Wei Dai:
> > This patch check if a input requested offloading is valid or not.
> > Any reuqested offloading must be supported in the device capabilities.
> > Any offloading is disabled by default if it is not set in the
> > parameter dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> > [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > If any offloading is enabled in rte_eth_dev_configure( ) by
> > application, it is enabled on all queues no matter whether it is
> > per-queue or per-port type and no matter whether it is set or cleared
> > in [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> > If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(
> > ), it can be enabled or disabled for individual queue in
> > ret_eth_[rt]x_queue_setup( ).
> > A new added offloading is the one which hasn't been enabled in
> > rte_eth_dev_configure( ) and is reuqested to be enabled in
> > rte_eth_[rt]x_queue_setup( ), it must be per-queue type, otherwise
> > triger an error log.
> > The underlying PMD must be aware that the requested offloadings to PMD
> > specific queue_setup( ) function only carries those new added
> > offloadings of per-queue type.
> 
> Good summary.
> Please forget the whitespace inside the parens.
I will remove whitespaces inside the parens.
> 
> > This patch can make above such checking in a common way in rte_ethdev
> > layer to avoid same checking in underlying PMD.
> 
> Good
> 
> > --- a/doc/guides/prog_guide/poll_mode_drv.rst
> > +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> > @@ -297,16 +297,30 @@ Per-Port and Per-Queue Offloads
> > ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> >
> >  In the DPDK offload API, offloads are divided into per-port and per-queue
> offloads.
> > +A per-queue offloading can be enabled on a queue and disabled on
> another queue at the same time.
> > +A pure per-port offloading can't be enabled on a queue and disabled on
> another queue at the same time.
> > +A pure per-port offloading must be enabled or disabled on all queues at
> the same time.
> > +A per-port offloading can be enabled or disabled on all queues at the
> same time.
> 
> What is the difference between pure per-port and per-port here?
> 
Can I add following words to explain more: 
A pure per-port offloads are those supported by device but not per-queue type.
A supported offload can be per-queue or pure per-port,  but can't be both
types at same device. 
Any offloadings are per-port type as it can be enabled or disabled on all queues
at the same time.

> > +It is certain that both per-queue and pure per-port offloading are per-port
> type.
> 
> I don't understand this sentence.
> 
Explained above, I just want to divide all offloading into two distinct types: 
Per-queue and pure per-port.
And per-queue + pure per-port = per-port.


> >  The different offloads capabilities can be queried using
> ``rte_eth_dev_info_get()``.
> > +The dev_info->[rt]x_queue_offload_capa returned from
> ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> > +The dev_info->[rt]x_offload_capa returned from
> ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading
> capabilities.
> 
> Yes
> 
> > +Any requested offloading by application must be within the device
> capabilities.
> 
> Yes
> 
> > +Any offloading is disabled by default if it is not set in the
> > +parameter
> 
> Yes
> 
> > +dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure( )`` and
> > +[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
> > +If any offloading is enabled in ``rte_eth_dev_configure( )`` by
> > +application, it is enabled on all queues no matter whether it is
> > +per-queue or per-port type and no matter whether it is set or cleared
> > +in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup( )``.
> > +If a per-queue offloading hasn't been enabled in
> > +``rte_eth_dev_configure( )``, it can be enabled or disabled in
> ``rte_eth_[rt]x_queue_setup( )`` for individual queue.
> 
> Yes
> 
> > +A new added offloads in [rt]x_conf->offloads to
> > +``rte_eth_[rt]x_queue_setup( )`` input by application is the one
> > +which hasn't been enabled in ``rte_eth_dev_configure( )`` and is requested
> to be enabled in ``rte_eth_[rt]x_queue_setup( )``, it must be per-queue type,
> otherwise return error.
> 
> Yes
> 
> 
> > --- a/doc/guides/rel_notes/release_18_05.rst
> > +++ b/doc/guides/rel_notes/release_18_05.rst
> > +* **ethdev: changes to offload API**
> 
> No need of bold formatting of title in API changes.
> 
Will revise it as you guide.

> > +
> > +   A pure per-port offloading isn't requested to be repeated in
> [rt]x_conf->offloads to
> > +   ``rte_eth_[rt]x_queue_setup( )``. Now any offloading enabled in
> ``rte_eth_dev_configure( )``
> > +   can't be disabled by ``rte_eth_[rt]x_queue_setup( )``. Any new added
> offloading which has
> > +   not been enabled in ``rte_eth_dev_configure( )`` and is requested to
> be enabled in
> > +   ``rte_eth_[rt]x_queue_setup( )`` must be per-queue type, otherwise
> return error.
> 
> 
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v11] ethdev: new Rx/Tx offloads API
  2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
                                   ` (2 preceding siblings ...)
  2018-05-10  9:25                 ` Andrew Rybchenko
@ 2018-05-10 11:30                 ` Wei Dai
  2018-05-10 11:56                   ` [dpdk-dev] [PATCH v12] " Wei Dai
  2018-05-10 21:08                 ` [dpdk-dev] [PATCH v10] " Ferruh Yigit
  4 siblings, 1 reply; 60+ messages in thread
From: Wei Dai @ 2018-05-10 11:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit; +Cc: dev, Wei Dai, Qi Zhang

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If any offloading is enabled in rte_eth_dev_configure() by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup().
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure() and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup(), it must be per-queue type,
otherwise trigger an error log.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup() function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get().

In the beginning of [rt]x_queue_setup() of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v11:
This patch set is based on 18.05-rc2 .
document update according to feedback
revise rte_ethdev.h for doxygen

v10:
sorry, miss the code change, fix the buidling error

v9:
replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
to avoid failure of application which hasn't been completely
converted to new offload API.

v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  28 ++++--
 doc/guides/rel_notes/release_18_05.rst  |   8 ++
 drivers/net/avf/avf_rxtx.c              |   5 +-
 drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
 drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
 drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
 drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
 drivers/net/e1000/em_ethdev.c           |  19 ----
 drivers/net/e1000/em_rxtx.c             |  64 ++-----------
 drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
 drivers/net/ena/ena_ethdev.c            |  65 +------------
 drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
 drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
 drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
 drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
 drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
 drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
 drivers/net/mlx4/mlx4_txq.c             |  42 ++------
 drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
 drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
 drivers/net/mlx5/mlx5_txq.c             |  44 +--------
 drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
 drivers/net/nfp/nfp_net.c               | 163 --------------------------------
 drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
 drivers/net/sfc/sfc_ethdev.c            |   9 +-
 drivers/net/sfc/sfc_rx.c                |  42 ++------
 drivers/net/sfc/sfc_rx.h                |   3 +-
 drivers/net/sfc/sfc_tx.c                |  42 ++------
 drivers/net/sfc/sfc_tx.h                |   3 +-
 drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
 drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
 drivers/net/virtio/virtio_rxtx.c        |   9 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
 drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
 lib/librte_ethdev/rte_ethdev.c          |  86 +++++++++++++++++
 lib/librte_ethdev/rte_ethdev.h          |  20 +++-
 36 files changed, 257 insertions(+), 1349 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 09a93ba..bbb85f0 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -297,16 +297,32 @@ Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offload is the one supported by device but not per-queue type.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure()`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
+If any offloading is enabled in ``rte_eth_dev_configure()`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure()``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup()`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup()``, it must be per-queue type, otherwise trigger an error log.
 
 For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 0ae61e8..716e9f4 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -303,6 +303,14 @@ API Changes
   * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
   * ``rte_flow_query()`` API parameter changed from action type to action structure.
 
+* ethdev: changes to offload API
+
+   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+   ``rte_eth_[rt]x_queue_setup()``. Now any offloading enabled in ``rte_eth_dev_configure()``
+   can't be disabled by ``rte_eth_[rt]x_queue_setup()``. Any new added offloading which has
+   not been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled in
+   ``rte_eth_[rt]x_queue_setup()`` must be per-queue type, otherwise trigger an error log.
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 1824ed7..e03a136 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t i, base, bsf, tc_mapping;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
 	if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > AVF_MAX_RING_DESC ||
 	    nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->free_thresh = tx_free_thresh;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	/* Allocate software ring */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 348129d..d00b99f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
-	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
-	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
 
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 3df51b5..fadf684 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
-	uint64_t unsupported_offloads, configured_offloads;
+	uint64_t configured_offloads;
 	int err;
 
 	CXGBE_FUNC_TRACE();
 	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
-	configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
-	unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
+		eth_dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_txconf *tx_conf)
+			     const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 	int err = 0;
 	unsigned int temp_nb_desc;
-	uint64_t unsupported_offloads;
-
-	unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_rxconf *rx_conf,
+			     const struct rte_eth_rxconf *rx_conf __rte_unused,
 			     struct rte_mempool *mp)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	unsigned int temp_nb_desc;
 	struct rte_eth_dev_info dev_info;
 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
-	uint64_t unsupported_offloads, configured_offloads;
-
-	configured_offloads = rx_conf->offloads;
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 6bf8c15..199afdd 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c304b82..de8d83a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA2_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA2_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA2_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA2_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 694a624..4e890ad 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
 
-	eth_em_infos_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return tx_queue_offload_capa;
 }
 
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t tsize;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			em_get_tx_port_offloads_capa(dev),
-			em_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	return 0;
 }
 
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	struct em_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	uint32_t rsize;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			em_get_rx_port_offloads_capa(dev),
-			em_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a3776a0..128ed0b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_tx_queue *txq;
 	struct e1000_hw     *hw;
 	uint32_t size;
+	uint64_t offloads;
 
-	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			igb_get_tx_port_offloads_capa(dev),
-			igb_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	return 0;
 }
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	unsigned int size;
+	uint64_t offloads;
 
-	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			igb_get_rx_port_offloads_capa(dev),
-			igb_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 41b5638..c595cc7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
-	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      const struct rte_eth_rxconf *rx_conf,
+			      __rte_unused const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
-	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    tx_offloads, adapter->tx_supported_offloads);
-		return -ENOTSUP;
-	}
-
-	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    rx_offloads, adapter->rx_supported_offloads);
-		return -ENOTSUP;
-	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
-	adapter->tx_selected_offloads = tx_offloads;
-	adapter->rx_selected_offloads = rx_offloads;
+	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
 	return 0;
 }
 
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->tx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->rx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 6d44884..368d23f 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -90,22 +90,10 @@ static int
 fs_dev_configure(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
-	uint64_t supp_tx_offloads;
-	uint64_t tx_offloads;
 	uint8_t i;
 	int ret;
 
 	fs_lock(dev, 0);
-	supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		ERROR("Some Tx offloads are not supported, "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-		      tx_offloads, supp_tx_offloads);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
 	fs_unlock(dev, 0);
 }
 
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.rxmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 		fs_rx_queue_release(rxq);
 		dev->data->rx_queues[rx_queue_id] = NULL;
 	}
-	/* Verify application offloads are valid for our port and queue. */
-	if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Rx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      rx_conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      PRIV(dev)->infos.rx_offload_capa |
-		      PRIV(dev)->infos.rx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	rxq = rte_zmalloc(NULL,
 			  sizeof(*rxq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	return rc;
 }
 
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.txmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_tx_queue_release(void *queue)
 {
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 		fs_tx_queue_release(txq);
 		dev->data->tx_queues[tx_queue_id] = NULL;
 	}
-	/*
-	 * Don't verify queue offloads for applications which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Tx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      tx_conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      PRIV(dev)->infos.tx_offload_capa |
-		      PRIV(dev)->infos.tx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	txq = rte_zmalloc("ethdev TX queue",
 			  sizeof(*txq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7dfeddf..7a59530 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -448,29 +448,13 @@ static int
 fm10k_dev_configure(struct rte_eth_dev *dev)
 {
 	int ret;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+	if ((dev->data->dev_conf.rxmode.offloads &
+	     DEV_RX_OFFLOAD_CRC_STRIP) == 0)
 		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
 
-	fm10k_dev_infos_get(dev, &dev_info);
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
 	if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	struct fm10k_rx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			fm10k_get_rx_port_offloads_capa(dev),
-			fm10k_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/* make sure the mempool element size can account for alignment. */
 	if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->queue_id = queue_id;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	if (handle_rxconf(q, conf))
 		return -EINVAL;
 
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct fm10k_tx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			fm10k_get_tx_port_offloads_capa(dev),
-			fm10k_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* make sure a valid number of descriptors have been requested */
 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->port_id = dev->data->port_id;
 	q->queue_id = queue_id;
 	q->txq_flags = conf->txq_flags;
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 62985c3..05b4950 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
-	if ((requested & dev_info.rx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_first_queue(uint16_t idx, void **queues, int num)
 {
 	uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/* Allocate the maximun number of RX ring hardware descriptor. */
 	len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
-	if ((requested & dev_info.tx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 				struct i40e_tx_queue *txq)
 {
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t reg_idx, i, base, bsf, tc_mapping;
 	int q_offset;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			dev_info.tx_offload_capa);
-			return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->queue_id = queue_idx;
 	txq->reg_idx = reg_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->vsi = vsi;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 91179e9..320ab21 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ixgbe_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	ixgbevf_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2892436..7de6f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			ixgbe_get_tx_queue_offloads(dev),
-			ixgbe_get_tx_port_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			ixgbe_get_rx_port_offloads(dev),
-			ixgbe_get_rx_queue_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 65f0994..35c44ff 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	};
 	int ret;
 	uint32_t crc_present;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      (mlx4_get_rx_port_offloads(priv) |
-		       mlx4_get_rx_queue_offloads(priv)));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		     (void *)dev, idx, desc);
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		crc_present = 0;
 	} else if (priv->hw_fcs_strip) {
 		crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index fe6a8e0..2443333 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		},
 	};
 	int ret;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      mlx4_get_tx_port_offloads(priv));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_tx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
 					   DEV_TX_OFFLOAD_UDP_CKSUM |
 					   DEV_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads &
+			      (offloads &
 			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 746b94f..df369cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	unsigned int reta_idx_n;
 	const uint8_t use_app_rss_key =
 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-	uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t supp_rx_offloads =
-		(mlx5_get_rx_port_offloads() |
-		 mlx5_get_rx_queue_offloads(dev));
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret = 0;
 
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Tx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, tx_offloads, supp_tx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
-	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Rx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, rx_offloads, supp_rx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (use_app_rss_key &&
 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 	     rss_hash_default_key_len)) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 126412d..cea93cf 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-	uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return 0;
-	if (((port_offloads ^ offloads) & port_supp_offloads))
-		return 0;
-	return 1;
-}
-
-/**
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		rte_errno = EOVERFLOW;
 		return -rte_errno;
 	}
-	if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-		DRV_LOG(ERR,
-			"port %u Rx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(mlx5_get_rx_port_offloads() |
-			 mlx5_get_rx_queue_offloads(dev)));
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (!mlx5_rxq_releasable(dev, idx)) {
 		DRV_LOG(ERR, "port %u unable to release queue index %u",
 			dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 */
 	const uint16_t desc_n =
 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	uint64_t offloads = conf->offloads |
+			   dev->data->dev_conf.rxmode.offloads;
 
 	tmpl = rte_calloc_socket("RXQ", 1,
 				 sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-	tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		tmpl->rxq.crc_present = 0;
 	} else if (config->hw_fcs_strip) {
 		tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4435874..fb7b4ad 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
-	/* There are no Tx offloads which are per queue. */
-	if ((offloads & port_supp_offloads) != offloads)
-		return 0;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return 0;
-	return 1;
-}
-
-/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_txq_ctrl *txq_ctrl =
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		DRV_LOG(ERR,
-			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			mlx5_get_tx_port_offloads(dev));
-		return -rte_errno;
-	}
 	if (desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	assert(desc > MLX5_TX_COMP_THRESH);
-	tmpl->txq.offloads = conf->offloads;
+	tmpl->txq.offloads = conf->offloads |
+			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 05998bf..c9d85ca 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-		RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.split_hdr_size) {
 		RTE_LOG(INFO, PMD, "Split headers not supported\n");
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
-		RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
-		return -EINVAL;
-	}
-
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		RTE_LOG(INFO, PMD, "LRO not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
 }
 
 /**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = MRVL_RX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the receive queue.
  *
  * @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	uint32_t min_size,
 		 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	int ret, tc, inq;
+	uint64_t offloads;
 
-	if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
 		/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled =
-		dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
 }
 
 /**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = MRVL_TX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the transmit queue.
  *
  * @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mrvl_priv *priv = dev->data->dev_private;
 	struct mrvl_txq *txq;
 
-	if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
-
 	if (dev->data->tx_queues[idx]) {
 		rte_free(dev->data->tx_queues[idx]);
 		dev->data->tx_queues[idx] = NULL;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 048324e..d3b8ec0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX offloads */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
-		PMD_INIT_LOG(INFO, "rxmode does not support split header");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
-		PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
-		PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
-		PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
 	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
 		PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
 
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
-		PMD_INIT_LOG(INFO, "Scatter not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
-		PMD_INIT_LOG(INFO, "timestamp offfload not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "security offload not supported");
-		return -EINVAL;
-	}
-
-	/* checking TX offloads */
-	if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-		PMD_INIT_LOG(INFO, "vlan insert offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
-		PMD_INIT_LOG(INFO, "TX checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
-		PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
-		PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
-		PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
-		PMD_INIT_LOG(INFO, "tunneling offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
-		PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
-		PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
-		PMD_INIT_LOG(INFO, "TX multisegs  not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
-		PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "TX security offload not supported");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-
-	if (rx_conf->offloads != rxmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
-				  queue_idx);
-		PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
-		PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
-		return -EINVAL;
-	}
-
 	/*
 	 * Free memory prior to re-allocation if needed. This is the case after
 	 * calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	txmode = &dev_conf->txmode;
-
-	if (tx_conf->offloads != txmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
-				  queue_idx);
-		return -EINVAL;
-	}
-
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 				    tx_conf->tx_free_thresh :
 				    DEFAULT_TX_FREE_THRESH);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 04120f5..4b14b8f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	configured_offloads = rxmode->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	configured_offloads = txmode->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
+		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
 static int
 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			    uint16_t nb_desc, unsigned int socket_id,
-			    const struct rte_eth_txconf *tx_conf)
+			    const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 	struct octeontx_txq *txq = NULL;
 	uint16_t dq_num;
 	int res = 0;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		dev->data->tx_queues[qidx] = NULL;
 	}
 
-	configured_offloads = tx_conf->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
-		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
 	/* Allocating tx queue data structure */
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
 				 RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint8_t gaura;
 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	port = nic->port_id;
 
-	configured_offloads = rx_conf->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 	/* Rx deferred start is not supported */
 	if (rx_conf->rx_deferred_start) {
 		octeontx_log_err("rx deferred start not supported");
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;
 
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..dbdd000 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 	}
 }
 
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
-			     sfc_rx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_rx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
-		   const struct rte_eth_rxconf *rx_conf)
+		   const struct rte_eth_rxconf *rx_conf,
+		   uint64_t offloads)
 {
 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
 				      sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
 		rc = EINVAL;
 	}
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+	if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
 	    DEV_RX_OFFLOAD_CHECKSUM)
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
 
 	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
 
-	if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -998,7 +973,8 @@ int
 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_rx_desc, unsigned int socket_id,
 	     const struct rte_eth_rxconf *rx_conf,
-	     struct rte_mempool *mb_pool)
+	     struct rte_mempool *mb_pool,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
 
-	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	}
 
 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
 			"object size is too small", sw_index);
 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags =
-		(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8..2898fe5 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
 int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
 		 uint16_t nb_rx_desc, unsigned int socket_id,
 		 const struct rte_eth_rxconf *rx_conf,
-		 struct rte_mempool *mb_pool);
+		 struct rte_mempool *mb_pool,
+		 uint64_t offloads);
 void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1cd08d8..a4a21fa 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 }
 
 static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
-			     sfc_tx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_tx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
-static int
 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
-		   const struct rte_eth_txconf *tx_conf)
+		   const struct rte_eth_txconf *tx_conf,
+		   uint64_t offloads)
 {
 	int rc = 0;
 
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
 
-	if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
 int
 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_tx_desc, unsigned int socket_id,
-	     const struct rte_eth_txconf *tx_conf)
+	     const struct rte_eth_txconf *tx_conf,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(txq_entries >= nb_tx_desc);
 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
 
-	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
 		SFC_TX_DEFAULT_FREE_THRESH;
 	txq->flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
 			   socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	info.max_fill_level = txq_max_fill_level;
 	info.free_thresh = txq->free_thresh;
 	info.flags = tx_conf->txq_flags;
-	info.offloads = tx_conf->offloads;
+	info.offloads = offloads;
 	info.txq_entries = txq_info->entries;
 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
 	info.txq_hw_ring = txq->mem.esm_base;
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13..d2b2c4d 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
 
 int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		 uint16_t nb_tx_desc, unsigned int socket_id,
-		 const struct rte_eth_txconf *tx_conf);
+		 const struct rte_eth_txconf *tx_conf,
+		 uint64_t offloads);
 void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 
 void sfc_tx_qflush_done(struct sfc_txq *txq);
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 172a7ba..78fe89b 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
 	       DEV_RX_OFFLOAD_CRC_STRIP;
 }
 
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
 	       DEV_TX_OFFLOAD_TCP_CKSUM;
 }
 
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
 	       unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
 static int
 tap_dev_configure(struct rte_eth_dev *dev)
 {
-	uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa();
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, supp_tx_offloads);
-		return -rte_errno;
-	}
 	if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
 		TAP_LOG(ERR,
 			"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
 		return -1;
 	}
 
-	/* Verify application offloads are valid for our port and queue. */
-	if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(tap_rx_offload_get_port_capa() |
-			 tap_rx_offload_get_queue_capa()));
-		return -rte_errno;
-	}
 	rxq->mp = mp;
 	rxq->trigger_seen = 1; /* force initial burst */
 	rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 	struct pmd_internals *internals = dev->data->dev_private;
 	struct tx_queue *txq;
 	int ret;
+	uint64_t offloads;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -1;
 	dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
 	txq = dev->data->tx_queues[tx_queue_id];
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
-		if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
-			txq->csum = !!(tx_conf->offloads &
-					(DEV_TX_OFFLOAD_IPV4_CKSUM |
-					 DEV_TX_OFFLOAD_UDP_CKSUM |
-					 DEV_TX_OFFLOAD_TCP_CKSUM));
-		} else {
-			rte_errno = ENOTSUP;
-			TAP_LOG(ERR,
-				"%p: Tx queue offloads 0x%" PRIx64
-				" don't match port offloads 0x%" PRIx64
-				" or supported offloads 0x%" PRIx64,
-				(void *)dev, tx_conf->offloads,
-				dev->data->dev_conf.txmode.offloads,
-				(tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa()));
-			return -rte_errno;
-		}
-	}
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->csum = !!(offloads &
+			(DEV_TX_OFFLOAD_IPV4_CKSUM |
+			 DEV_TX_OFFLOAD_UDP_CKSUM |
+			 DEV_TX_OFFLOAD_TCP_CKSUM));
+
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
 		return -1;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b673b47..23baa99 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-	conf_offloads = tx_conf->offloads;
-	offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	unsupported_offloads = conf_offloads & ~offload_capa;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->tx_free_thresh = tx_free_thresh;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	txq->offloads = conf_offloads;
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads = offloads;
 
-	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-
-	conf_offloads = rx_conf->offloads;
-
-	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	offload_capa = NICVF_RX_OFFLOAD_CAPA;
-	unsupported_offloads = conf_offloads & ~offload_capa;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+			rte_mempool_avail_count(mp), rxq->phys, offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
-	uint64_t conf_rx_offloads, rx_offload_capa;
-	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
-	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_tx_offloads, tx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	conf_rx_offloads = rxmode->offloads;
-	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
-	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_rx_offloads, rx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b..92fab21 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
 			uint16_t nb_desc,
 			unsigned int socket_id __rte_unused,
-			const struct rte_eth_rxconf *rx_conf,
+			const struct rte_eth_rxconf *rx_conf __rte_unused,
 			struct rte_mempool *mp)
 {
-	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			"Cannot allocate mbufs for rx virtqueue");
 	}
 
-	if ((rx_conf->offloads ^ rxmode->offloads) &
-	    VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
-		return -EINVAL;
-
 	dev->data->rx_queues[queue_idx] = rxvq;
 
 	return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 
 	/* cannot use simple rxtx funcs with multisegs or offloads */
-	if (tx_conf->offloads)
+	if (dev->data->dev_conf.txmode.offloads)
 		hw->use_simple_tx = 0;
 
 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index c850241..ba932ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	size_t size;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
-	if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index f6e2d98..cf85f3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   uint16_t queue_idx,
 			   uint16_t nb_desc,
 			   unsigned int socket_id,
-			   const struct rte_eth_txconf *tx_conf)
+			   const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
-		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
 			  RTE_CACHE_LINE_SIZE);
 	if (txq == NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..5baa2aa 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
+				"0x%" PRIx64 " doesn't match Rx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.rxmode.offloads,
+				dev_info.rx_offload_capa,
+				__func__);
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
+				"0x%" PRIx64 " doesn't match Tx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.txmode.offloads,
+				dev_info.tx_offload_capa,
+				__func__);
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,38 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
+				"added offloads 0x%" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x%"
+				PRIx64 " in %s( )\n",
+				port_id,
+				rx_queue_id,
+				local_conf.offloads,
+				dev_info.rx_queue_offload_capa,
+				__func__);
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1612,6 +1666,38 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
+				"added offloads 0x%" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x%"
+				PRIx64 " in %s( )\n",
+				port_id,
+				tx_queue_id,
+				local_conf.offloads,
+				dev_info.tx_queue_offload_capa,
+				__func__);
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index 7ccf4ba..56eca2c 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -1067,9 +1067,9 @@ struct rte_eth_dev_info {
 	uint16_t max_vfs; /**< Maximum number of VFs. */
 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
 	uint64_t rx_offload_capa;
-	/**< Device per port RX offload capabilities. */
+	/**< All RX offload capabilities including all per queue ones */
 	uint64_t tx_offload_capa;
-	/**< Device per port TX offload capabilities. */
+	/**< All TX offload capabilities.including all per-queue ones */
 	uint64_t rx_queue_offload_capa;
 	/**< Device per queue RX offload capabilities. */
 	uint64_t tx_queue_offload_capa;
@@ -1546,6 +1546,13 @@ const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
  *        The Rx offload bitfield API is obsolete and will be deprecated.
  *        Applications should set the ignore_bitfield_offloads bit on *rxmode*
  *        structure and use offloads field to set per-port offloads instead.
+ *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
+ *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
+ *        Any type of device supported offloading set in the input argument
+ *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
+ *        on all [RT]x queues and it can't be disabled no matter whether
+ *        it is cleared or set in the input argument [rt]x_conf->offloads
+*         to rte_eth_[rt]x_queue_setup().
  *     - the Receive Side Scaling (RSS) configuration when using multiple RX
  *         queues per port.
  *
@@ -1602,6 +1609,10 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
  *   the DEV_RX_OFFLOAD_* flags.
+ *   If an offloading set in rx_conf->offloads
+ *   hasn't been set in the input argument eth_conf->rxmode.offloads
+ *   to rte_eth_dev_configure(), it is a new added offloading, it must be
+ *   per-queue type and it is enabled for the queue.
  * @param mb_pool
  *   The pointer to the memory pool from which to allocate *rte_mbuf* network
  *   memory buffers to populate each descriptor of the receive ring.
@@ -1660,7 +1671,10 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
  *     should set it to ETH_TXQ_FLAGS_IGNORE and use
  *     the offloads field below.
  *   - The *offloads* member contains Tx offloads to be enabled.
- *     Offloads which are not set cannot be used on the datapath.
+ *     If an offloading set in tx_conf->offloads
+ *     hasn't been set in the input argument eth_conf->txmode.offloads
+ *     to rte_eth_dev_configure(), it is a new added offloading, it must be
+ *     per-queue type and it is enabled for the queue.
  *
  *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
  *     the transmit function to use default values.
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v12] ethdev: new Rx/Tx offloads API
  2018-05-10 11:30                 ` [dpdk-dev] [PATCH v11] " Wei Dai
@ 2018-05-10 11:56                   ` Wei Dai
  2018-05-10 21:39                     ` Thomas Monjalon
                                       ` (2 more replies)
  0 siblings, 3 replies; 60+ messages in thread
From: Wei Dai @ 2018-05-10 11:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit; +Cc: dev, Wei Dai, Qi Zhang

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If any offloading is enabled in rte_eth_dev_configure() by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup().
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure() and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup(), it must be per-queue type,
otherwise trigger an error log.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup() function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get().

In the beginning of [rt]x_queue_setup() of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v12:
fix coding style warning

v11:
This patch set is based on 18.05-rc2 .
document update according to feedback
revise rte_ethdev.h for doxygen

v10:
sorry, miss the code change, fix the buidling error

v9:
replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
to avoid failure of application which hasn't been completely
converted to new offload API.

v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  28 ++++--
 doc/guides/rel_notes/release_18_05.rst  |   8 ++
 drivers/net/avf/avf_rxtx.c              |   5 +-
 drivers/net/bnxt/bnxt_ethdev.c          |  17 ----
 drivers/net/cxgbe/cxgbe_ethdev.c        |  50 +---------
 drivers/net/dpaa/dpaa_ethdev.c          |  16 ----
 drivers/net/dpaa2/dpaa2_ethdev.c        |  16 ----
 drivers/net/e1000/em_ethdev.c           |  19 ----
 drivers/net/e1000/em_rxtx.c             |  64 ++-----------
 drivers/net/e1000/igb_rxtx.c            |  64 ++-----------
 drivers/net/ena/ena_ethdev.c            |  65 +------------
 drivers/net/failsafe/failsafe_ops.c     |  81 ----------------
 drivers/net/fm10k/fm10k_ethdev.c        |  82 ++--------------
 drivers/net/i40e/i40e_rxtx.c            |  58 ++----------
 drivers/net/ixgbe/ixgbe_ethdev.c        |  38 --------
 drivers/net/ixgbe/ixgbe_rxtx.c          |  66 ++-----------
 drivers/net/mlx4/mlx4_rxq.c             |  43 ++-------
 drivers/net/mlx4/mlx4_txq.c             |  42 ++------
 drivers/net/mlx5/mlx5_ethdev.c          |  22 -----
 drivers/net/mlx5/mlx5_rxq.c             |  50 ++--------
 drivers/net/mlx5/mlx5_txq.c             |  44 +--------
 drivers/net/mvpp2/mrvl_ethdev.c         |  97 +------------------
 drivers/net/nfp/nfp_net.c               | 163 --------------------------------
 drivers/net/octeontx/octeontx_ethdev.c  |  72 +-------------
 drivers/net/sfc/sfc_ethdev.c            |   9 +-
 drivers/net/sfc/sfc_rx.c                |  42 ++------
 drivers/net/sfc/sfc_rx.h                |   3 +-
 drivers/net/sfc/sfc_tx.c                |  42 ++------
 drivers/net/sfc/sfc_tx.h                |   3 +-
 drivers/net/tap/rte_eth_tap.c           |  88 ++---------------
 drivers/net/thunderx/nicvf_ethdev.c     |  70 ++------------
 drivers/net/virtio/virtio_rxtx.c        |   9 +-
 drivers/net/vmxnet3/vmxnet3_ethdev.c    |  16 ----
 drivers/net/vmxnet3/vmxnet3_rxtx.c      |   8 +-
 lib/librte_ethdev/rte_ethdev.c          |  86 +++++++++++++++++
 lib/librte_ethdev/rte_ethdev.h          |  20 +++-
 36 files changed, 257 insertions(+), 1349 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 09a93ba..bbb85f0 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -297,16 +297,32 @@ Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offload is the one supported by device but not per-queue type.
+A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+A pure per-port offloading must be enabled or disabled on all queues at the same time.
+Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
+A per-port offloading can be enabled or disabled on all queues at the same time.
+It is certain that both per-queue and pure per-port offloading are per-port type.
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
+Any requested offloading by application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure()`` and
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
+If any offloading is enabled in ``rte_eth_dev_configure()`` by application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure()``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup()`` for individual queue.
+A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup()``, it must be per-queue type, otherwise trigger an error log.
 
 For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 0ae61e8..716e9f4 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -303,6 +303,14 @@ API Changes
   * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
   * ``rte_flow_query()`` API parameter changed from action type to action structure.
 
+* ethdev: changes to offload API
+
+   A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+   ``rte_eth_[rt]x_queue_setup()``. Now any offloading enabled in ``rte_eth_dev_configure()``
+   can't be disabled by ``rte_eth_[rt]x_queue_setup()``. Any new added offloading which has
+   not been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled in
+   ``rte_eth_[rt]x_queue_setup()`` must be per-queue type, otherwise trigger an error log.
+
 
 ABI Changes
 -----------
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 1824ed7..e03a136 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t i, base, bsf, tc_mapping;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
 	if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
 	    nb_desc > AVF_MAX_RING_DESC ||
 	    nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->free_thresh = tx_free_thresh;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	/* Allocate software ring */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 348129d..d00b99f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -500,25 +500,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
-	uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-	if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			 tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
-	if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
-		PMD_DRV_LOG
-			(ERR,
-			 "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
-			    rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
-		return -ENOTSUP;
-	}
-
 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
 
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 3df51b5..fadf684 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -366,31 +366,15 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
-	uint64_t unsupported_offloads, configured_offloads;
+	uint64_t configured_offloads;
 	int err;
 
 	CXGBE_FUNC_TRACE();
 	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
-	configured_offloads = eth_dev->data->dev_conf.txmode.offloads;
-	unsupported_offloads = configured_offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
+		eth_dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
@@ -440,7 +424,7 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_txconf *tx_conf)
+			     const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adapter = pi->adapter;
@@ -448,15 +432,6 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
 	int err = 0;
 	unsigned int temp_nb_desc;
-	uint64_t unsupported_offloads;
-
-	unsupported_offloads = tx_conf->offloads & ~CXGBE_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Tx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
@@ -553,7 +528,7 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 			     uint16_t queue_idx, uint16_t nb_desc,
 			     unsigned int socket_id,
-			     const struct rte_eth_rxconf *rx_conf,
+			     const struct rte_eth_rxconf *rx_conf __rte_unused,
 			     struct rte_mempool *mp)
 {
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -565,21 +540,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	unsigned int temp_nb_desc;
 	struct rte_eth_dev_info dev_info;
 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
-	uint64_t unsupported_offloads, configured_offloads;
-
-	configured_offloads = rx_conf->offloads;
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		dev_info(adapter, "can't disable hw crc strip\n");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~CXGBE_RX_OFFLOADS;
-	if (unsupported_offloads) {
-		dev_err(adapter, "Rx offloads 0x%" PRIx64 " are not supported. "
-			"Supported:0x%" PRIx64 "\n",
-			unsupported_offloads, (uint64_t)CXGBE_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 
 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 6bf8c15..199afdd 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -176,14 +176,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -192,14 +184,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c304b82..de8d83a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -309,14 +309,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	/* Rx offloads validation */
-	if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
-		DPAA2_PMD_ERR(
-		"Rx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			rx_offloads,
-			dev_rx_offloads_sup | dev_rx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_rx_offloads_nodis & ~rx_offloads) {
 		DPAA2_PMD_WARN(
 		"Rx offloads non configurable - requested 0x%" PRIx64
@@ -325,14 +317,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Tx offloads validation */
-	if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
-		DPAA2_PMD_ERR(
-		"Tx offloads non supported - requested 0x%" PRIx64
-		" supported 0x%" PRIx64,
-			tx_offloads,
-			dev_tx_offloads_sup | dev_tx_offloads_nodis);
-		return -ENOTSUP;
-	}
 	if (dev_tx_offloads_nodis & ~tx_offloads) {
 		DPAA2_PMD_WARN(
 		"Tx offloads non configurable - requested 0x%" PRIx64
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 694a624..4e890ad 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -454,29 +454,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
 
-	eth_em_infos_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return tx_queue_offload_capa;
 }
 
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t tsize;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			em_get_tx_port_offloads_capa(dev),
-			em_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	return 0;
 }
 
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	struct em_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	uint32_t rsize;
+	uint64_t offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			em_get_rx_port_offloads_capa(dev),
-			em_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	return 0;
 }
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index a3776a0..128ed0b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1475,22 +1475,6 @@ igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1502,19 +1486,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_tx_queue *txq;
 	struct e1000_hw     *hw;
 	uint32_t size;
+	uint64_t offloads;
 
-	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			igb_get_tx_port_offloads_capa(dev),
-			igb_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1599,7 +1573,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	return 0;
 }
@@ -1690,22 +1664,6 @@ igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
 	return rx_queue_offload_capa;
 }
 
-static int
-igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1718,19 +1676,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct igb_rx_queue *rxq;
 	struct e1000_hw     *hw;
 	unsigned int size;
+	uint64_t offloads;
 
-	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev,
-			rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			igb_get_rx_port_offloads_capa(dev),
-			igb_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1756,7 +1704,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 41b5638..c595cc7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
 			      struct rte_eth_rss_reta_entry64 *reta_conf,
 			      uint16_t reta_size);
 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads);
 
 static const struct eth_dev_ops ena_dev_ops = {
 	.dev_configure        = ena_dev_configure,
@@ -1005,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
-	    !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
 
 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1065,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 	for (i = 0; i < txq->ring_size; i++)
 		txq->empty_tx_reqs[i] = i;
 
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* Store pointer to this queue in upper layer */
 	txq->configured = 1;
@@ -1078,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 			      uint16_t queue_idx,
 			      uint16_t nb_desc,
 			      __rte_unused unsigned int socket_id,
-			      const struct rte_eth_rxconf *rx_conf,
+			      __rte_unused const struct rte_eth_rxconf *rx_conf,
 			      struct rte_mempool *mp)
 {
 	struct ena_com_create_io_ctx ctx =
@@ -1114,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
-		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
-		return -EINVAL;
-	}
-
 	ena_qid = ENA_IO_RXQ_IDX(queue_idx);
 
 	ctx.qid = ena_qid;
@@ -1422,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 {
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(dev->data->dev_private);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
-	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    tx_offloads, adapter->tx_supported_offloads);
-		return -ENOTSUP;
-	}
-
-	if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
-		    "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		    rx_offloads, adapter->rx_supported_offloads);
-		return -ENOTSUP;
-	}
 
 	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
 	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1459,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 		break;
 	}
 
-	adapter->tx_selected_offloads = tx_offloads;
-	adapter->rx_selected_offloads = rx_offloads;
+	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
 	return 0;
 }
 
@@ -1489,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->tx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
-					      uint64_t offloads)
-{
-	uint64_t port_offloads = adapter->rx_selected_offloads;
-
-	/* Check if port supports all requested offloads.
-	 * True if all offloads selected for queue are set for port.
-	 */
-	if ((offloads & port_offloads) != offloads)
-		return false;
-	return true;
-}
-
 static void ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 6d44884..368d23f 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -90,22 +90,10 @@ static int
 fs_dev_configure(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
-	uint64_t supp_tx_offloads;
-	uint64_t tx_offloads;
 	uint8_t i;
 	int ret;
 
 	fs_lock(dev, 0);
-	supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		ERROR("Some Tx offloads are not supported, "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-		      tx_offloads, supp_tx_offloads);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -297,25 +285,6 @@ fs_dev_close(struct rte_eth_dev *dev)
 	fs_unlock(dev, 0);
 }
 
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.rxmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_rx_queue_release(void *queue)
 {
@@ -368,19 +337,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 		fs_rx_queue_release(rxq);
 		dev->data->rx_queues[rx_queue_id] = NULL;
 	}
-	/* Verify application offloads are valid for our port and queue. */
-	if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Rx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      rx_conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      PRIV(dev)->infos.rx_offload_capa |
-		      PRIV(dev)->infos.rx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	rxq = rte_zmalloc(NULL,
 			  sizeof(*rxq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -499,25 +455,6 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	return rc;
 }
 
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads;
-	uint64_t queue_supp_offloads;
-	uint64_t port_supp_offloads;
-
-	port_offloads = dev->data->dev_conf.txmode.offloads;
-	queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
-	port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	     offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 fs_tx_queue_release(void *queue)
 {
@@ -557,24 +494,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 		fs_tx_queue_release(txq);
 		dev->data->tx_queues[tx_queue_id] = NULL;
 	}
-	/*
-	 * Don't verify queue offloads for applications which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
-		rte_errno = ENOTSUP;
-		ERROR("Tx queue offloads 0x%" PRIx64
-		      " don't match port offloads 0x%" PRIx64
-		      " or supported offloads 0x%" PRIx64,
-		      tx_conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      PRIV(dev)->infos.tx_offload_capa |
-		      PRIV(dev)->infos.tx_queue_offload_capa);
-		fs_unlock(dev, 0);
-		return -rte_errno;
-	}
 	txq = rte_zmalloc("ethdev TX queue",
 			  sizeof(*txq) +
 			  sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7dfeddf..7a59530 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -448,29 +448,13 @@ static int
 fm10k_dev_configure(struct rte_eth_dev *dev)
 {
 	int ret;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+	if ((dev->data->dev_conf.rxmode.offloads &
+	     DEV_RX_OFFLOAD_CRC_STRIP) == 0)
 		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
 
-	fm10k_dev_infos_get(dev, &dev_info);
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* multipe queue mode checking */
 	ret  = fm10k_check_mq_mode(dev);
 	if (ret != 0) {
@@ -1827,22 +1811,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = fm10k_get_rx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_rx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -1852,20 +1820,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
 	struct fm10k_rx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_rx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			fm10k_get_rx_port_offloads_capa(dev),
-			fm10k_get_rx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/* make sure the mempool element size can account for alignment. */
 	if (!mempool_element_size_valid(mp)) {
@@ -1911,7 +1870,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->queue_id = queue_id;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	if (handle_rxconf(q, conf))
 		return -EINVAL;
 
@@ -2040,22 +1999,6 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
 }
 
 static int
-fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
-	uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
-static int
 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint16_t nb_desc, unsigned int socket_id,
 	const struct rte_eth_txconf *conf)
@@ -2063,20 +2006,11 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct fm10k_tx_queue *q;
 	const struct rte_memzone *mz;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			fm10k_get_tx_port_offloads_capa(dev),
-			fm10k_get_tx_queue_offloads_capa(dev));
-		return -ENOTSUP;
-	}
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/* make sure a valid number of descriptors have been requested */
 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
@@ -2115,7 +2049,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	q->port_id = dev->data->port_id;
 	q->queue_id = queue_id;
 	q->txq_flags = conf->txq_flags;
-	q->offloads = conf->offloads;
+	q->offloads = offloads;
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 62985c3..05b4950 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1690,20 +1690,6 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 static int
-i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
-	if ((requested & dev_info.rx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_first_queue(uint16_t idx, void **queues, int num)
 {
 	uint16_t i;
@@ -1792,18 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len, i;
 	uint16_t reg_idx, base, bsf, tc_mapping;
 	int q_offset, use_def_burst_func = 1;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1857,7 +1834,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->vsi = vsi;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/* Allocate the maximun number of RX ring hardware descriptor. */
 	len = I40E_MAX_RING_DESC;
@@ -2075,20 +2052,6 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 static int
-i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	struct rte_eth_dev_info dev_info;
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported; /* All per port offloads */
-
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
-	if ((requested & dev_info.tx_offload_capa) != requested)
-		return 0; /* requested range check */
-	return !((mandatory ^ requested) & supported);
-}
-
-static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 				struct i40e_tx_queue *txq)
 {
@@ -2151,18 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint16_t reg_idx, i, base, bsf, tc_mapping;
 	int q_offset;
-	struct rte_eth_dev_info dev_info;
+	uint64_t offloads;
 
-	if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		dev->dev_ops->dev_infos_get(dev, &dev_info);
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port  offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			dev_info.tx_offload_capa);
-			return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
 		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2297,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->queue_id = queue_idx;
 	txq->reg_idx = reg_idx;
 	txq->port_id = dev->data->port_id;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->vsi = vsi;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 91179e9..320ab21 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2365,9 +2365,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2379,22 +2376,6 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ixgbe_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -4965,29 +4946,10 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
-	struct rte_eth_dev_info dev_info;
-	uint64_t rx_offloads;
-	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
-	ixgbevf_dev_info_get(dev, &dev_info);
-	rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    rx_offloads, dev_info.rx_offload_capa);
-		return -ENOTSUP;
-	}
-	tx_offloads = dev->data->dev_conf.txmode.offloads;
-	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
-		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
-			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			    tx_offloads, dev_info.tx_offload_capa);
-		return -ENOTSUP;
-	}
-
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2892436..7de6f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2448,22 +2448,6 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
-static int
-ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2475,25 +2459,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct ixgbe_tx_queue *txq;
 	struct ixgbe_hw     *hw;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64,
-			(void *)dev, tx_conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			ixgbe_get_tx_queue_offloads(dev),
-			ixgbe_get_tx_port_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * Validate number of transmit descriptors.
@@ -2621,7 +2592,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -2915,22 +2886,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 	return offloads;
 }
 
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
-	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
-	if ((requested & (queue_supported | port_supported)) != requested)
-		return 0;
-
-	if ((port_offloads ^ requested) & port_supported)
-		return 0;
-
-	return 1;
-}
-
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2945,21 +2900,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
-		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported port offloads 0x%" PRIx64
-			" or supported queue offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			ixgbe_get_rx_port_offloads(dev),
-			ixgbe_get_rx_queue_offloads(dev));
-		return -ENOTSUP;
-	}
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * Validate number of receive descriptors.
@@ -2994,7 +2940,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-	rxq->offloads = rx_conf->offloads;
+	rxq->offloads = offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 65f0994..35c44ff 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -693,26 +693,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Rx queue.
  *
  * @param dev
@@ -754,20 +734,13 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	};
 	int ret;
 	uint32_t crc_present;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-	(void)conf; /* Thresholds configuration (ignored). */
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.rxmode.offloads,
-		      (mlx4_get_rx_port_offloads(priv) |
-		       mlx4_get_rx_queue_offloads(priv)));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_rx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -793,7 +766,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		     (void *)dev, idx, desc);
 	}
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		crc_present = 0;
 	} else if (priv->hw_fcs_strip) {
 		crc_present = 1;
@@ -825,9 +798,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts = elts,
 		/* Toggle Rx checksum offload if hardware supports it. */
 		.csum = priv->hw_csum &&
-			(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			(offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+			      (offloads & DEV_RX_OFFLOAD_CHECKSUM),
 		.crc_present = crc_present,
 		.l2tun_offload = priv->hw_csum_l2tun,
 		.stats = {
@@ -840,7 +813,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		uint32_t size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index fe6a8e0..2443333 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -180,26 +180,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- *   Pointer to private structure.
- * @param requested
- *   Per-queue offloads configuration.
- *
- * @return
- *   Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
-	uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
-	return !((mandatory ^ requested) & supported);
-}
-
-/**
  * DPDK callback to configure a Tx queue.
  *
  * @param dev
@@ -246,23 +226,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		},
 	};
 	int ret;
+	uint64_t offloads;
+
+	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	DEBUG("%p: configuring queue %u for %u descriptors",
 	      (void *)dev, idx, desc);
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
-		      "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
-		      (void *)dev, conf->offloads,
-		      dev->data->dev_conf.txmode.offloads,
-		      mlx4_get_tx_port_offloads(priv));
-		return -rte_errno;
-	}
+
 	if (idx >= dev->data->nb_tx_queues) {
 		rte_errno = EOVERFLOW;
 		ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +283,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		.elts_comp_cd_init =
 			RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
 		.csum = priv->hw_csum &&
-			(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+			(offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
 					   DEV_TX_OFFLOAD_UDP_CKSUM |
 					   DEV_TX_OFFLOAD_TCP_CKSUM)),
 		.csum_l2tun = priv->hw_csum_l2tun &&
-			      (conf->offloads &
+			      (offloads &
 			       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
 		/* Enable Tx loopback for VF devices. */
 		.lb = !!priv->vf,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 746b94f..df369cd 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -330,30 +330,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
 	unsigned int reta_idx_n;
 	const uint8_t use_app_rss_key =
 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
-	uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev);
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t supp_rx_offloads =
-		(mlx5_get_rx_port_offloads() |
-		 mlx5_get_rx_queue_offloads(dev));
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	int ret = 0;
 
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Tx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, tx_offloads, supp_tx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
-	if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
-		DRV_LOG(ERR,
-			"port %u some Rx offloads are not supported requested"
-			" 0x%" PRIx64 " supported 0x%" PRIx64,
-			dev->data->port_id, rx_offloads, supp_rx_offloads);
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (use_app_rss_key &&
 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
 	     rss_hash_default_key_len)) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 126412d..cea93cf 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -237,32 +237,6 @@ mlx5_get_rx_port_offloads(void)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
-	uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return 0;
-	if (((port_offloads ^ offloads) & port_supp_offloads))
-		return 0;
-	return 1;
-}
-
-/**
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -305,18 +279,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		rte_errno = EOVERFLOW;
 		return -rte_errno;
 	}
-	if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
-		DRV_LOG(ERR,
-			"port %u Rx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(mlx5_get_rx_port_offloads() |
-			 mlx5_get_rx_queue_offloads(dev)));
-		rte_errno = ENOTSUP;
-		return -rte_errno;
-	}
 	if (!mlx5_rxq_releasable(dev, idx)) {
 		DRV_LOG(ERR, "port %u unable to release queue index %u",
 			dev->data->port_id, idx);
@@ -980,6 +942,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 */
 	const uint16_t desc_n =
 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	uint64_t offloads = conf->offloads |
+			   dev->data->dev_conf.rxmode.offloads;
 
 	tmpl = rte_calloc_socket("RXQ", 1,
 				 sizeof(*tmpl) +
@@ -997,7 +961,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
 	    (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
-	} else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size =
 			RTE_PKTMBUF_HEADROOM +
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1044,12 +1008,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		goto error;
 	}
 	/* Toggle RX checksum offload if hardware supports it. */
-	tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
-	tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
 	/* Configure VLAN stripping. */
-	tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* By default, FCS (CRC) is stripped by hardware. */
-	if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+	if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		tmpl->rxq.crc_present = 0;
 	} else if (config->hw_fcs_strip) {
 		tmpl->rxq.crc_present = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4435874..fb7b4ad 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -127,31 +127,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 }
 
 /**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param offloads
- *   Per-queue offloads configuration.
- *
- * @return
- *   1 if the configuration is valid, 0 otherwise.
- */
-static int
-mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
-
-	/* There are no Tx offloads which are per queue. */
-	if ((offloads & port_supp_offloads) != offloads)
-		return 0;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return 0;
-	return 1;
-}
-
-/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -177,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mlx5_txq_ctrl *txq_ctrl =
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
-	    !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
-		rte_errno = ENOTSUP;
-		DRV_LOG(ERR,
-			"port %u Tx queue offloads 0x%" PRIx64 " don't match"
-			" port offloads 0x%" PRIx64 " or supported offloads 0x%"
-			PRIx64,
-			dev->data->port_id, conf->offloads,
-			dev->data->dev_conf.txmode.offloads,
-			mlx5_get_tx_port_offloads(dev));
-		return -rte_errno;
-	}
 	if (desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
@@ -810,7 +769,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		return NULL;
 	}
 	assert(desc > MLX5_TX_COMP_THRESH);
-	tmpl->txq.offloads = conf->offloads;
+	tmpl->txq.offloads = conf->offloads |
+			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
 	tmpl->socket = socket;
 	tmpl->txq.elts_n = log2above(desc);
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 05998bf..c9d85ca 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -318,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-		RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.split_hdr_size) {
 		RTE_LOG(INFO, PMD, "Split headers not supported\n");
 		return -EINVAL;
 	}
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
-		RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
-		return -EINVAL;
-	}
-
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		RTE_LOG(INFO, PMD, "LRO not supported\n");
-		return -EINVAL;
-	}
-
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
 				 ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -1522,42 +1507,6 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
 }
 
 /**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = MRVL_RX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the receive queue.
  *
  * @param dev
@@ -1587,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	uint32_t min_size,
 		 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	int ret, tc, inq;
+	uint64_t offloads;
 
-	if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
+	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
 		/*
@@ -1622,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 
 	rxq->priv = priv;
 	rxq->mp = mp;
-	rxq->cksum_enabled =
-		dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+	rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
 	rxq->queue_id = idx;
 	rxq->port_id = dev->data->port_id;
 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1686,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
 }
 
 /**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- *   dev Pointer to the device.
- * @param
- *   requested Bitmap of the requested offloads.
- *
- * @return
- *   1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
-	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = MRVL_TX_OFFLOADS;
-	uint64_t unsupported = requested & ~supported;
-	uint64_t missing = mandatory & ~requested;
-
-	if (unsupported) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. "
-			"Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-			requested, supported);
-		return 0;
-	}
-
-	if (missing) {
-		RTE_LOG(ERR, PMD, "Some Tx offloads are missing. "
-			"Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
-			requested, missing);
-		return 0;
-	}
-
-	return 1;
-}
-
-/**
  * DPDK callback to configure the transmit queue.
  *
  * @param dev
@@ -1746,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	struct mrvl_priv *priv = dev->data->dev_private;
 	struct mrvl_txq *txq;
 
-	if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
-		return -ENOTSUP;
-
 	if (dev->data->tx_queues[idx]) {
 		rte_free(dev->data->tx_queues[idx]);
 		dev->data->tx_queues[idx] = NULL;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 048324e..d3b8ec0 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -412,148 +412,9 @@ nfp_net_configure(struct rte_eth_dev *dev)
 	}
 
 	/* Checking RX offloads */
-	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
-		PMD_INIT_LOG(INFO, "rxmode does not support split header");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
-		PMD_INIT_LOG(INFO, "RXCSUM not supported");
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
-		return -EINVAL;
-	}
-
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
-		PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
-		PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
-		PMD_INIT_LOG(INFO, "MACSEC strip not supported");
-		return -EINVAL;
-	}
-
 	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
 		PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
 
-	if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
-		PMD_INIT_LOG(INFO, "Scatter not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
-		PMD_INIT_LOG(INFO, "timestamp offfload not supported");
-		return -EINVAL;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "security offload not supported");
-		return -EINVAL;
-	}
-
-	/* checking TX offloads */
-	if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-		PMD_INIT_LOG(INFO, "vlan insert offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
-		PMD_INIT_LOG(INFO, "TX checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
-		PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
-		PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
-		PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
-		PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
-	    txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
-		PMD_INIT_LOG(INFO, "tunneling offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
-		PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
-		PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
-		return -EINVAL;
-	}
-
-	if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
-	    !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
-		PMD_INIT_LOG(INFO, "TX multisegs  not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
-		PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
-		return -EINVAL;
-	}
-
-	if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
-		PMD_INIT_LOG(INFO, "TX security offload not supported");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1600,8 +1461,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1615,17 +1474,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-
-	if (rx_conf->offloads != rxmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads",
-				  queue_idx);
-		PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads);
-		PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads);
-		return -EINVAL;
-	}
-
 	/*
 	 * Free memory prior to re-allocation if needed. This is the case after
 	 * calling nfp_net_stop
@@ -1762,8 +1610,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
-	struct rte_eth_conf *dev_conf;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1777,15 +1623,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	dev_conf = &dev->data->dev_conf;
-	txmode = &dev_conf->txmode;
-
-	if (tx_conf->offloads != txmode->offloads) {
-		PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads",
-				  queue_idx);
-		return -EINVAL;
-	}
-
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 				    tx_conf->tx_free_thresh :
 				    DEFAULT_TX_FREE_THRESH);
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 04120f5..4b14b8f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -262,8 +262,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -285,38 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	configured_offloads = rxmode->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+	if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	configured_offloads = txmode->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+	if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
 		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
+		txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
 	}
 
 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -738,14 +712,12 @@ octeontx_dev_tx_queue_release(void *tx_queue)
 static int
 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			    uint16_t nb_desc, unsigned int socket_id,
-			    const struct rte_eth_txconf *tx_conf)
+			    const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
 	struct octeontx_txq *txq = NULL;
 	uint16_t dq_num;
 	int res = 0;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
@@ -766,22 +738,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		dev->data->tx_queues[qidx] = NULL;
 	}
 
-	configured_offloads = tx_conf->offloads;
-
-	if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
-		PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
-		configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_TX_OFFLOADS);
-		return -ENOTSUP;
-	}
-
 	/* Allocating tx queue data structure */
 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
 				 RTE_CACHE_LINE_SIZE, nic->node);
@@ -837,8 +793,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint8_t gaura;
 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
-	uint64_t configured_offloads;
-	uint64_t unsupported_offloads;
 
 	RTE_SET_USED(nb_desc);
 
@@ -861,22 +815,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	port = nic->port_id;
 
-	configured_offloads = rx_conf->offloads;
-
-	if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
-		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
-		configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
-	}
-
-	unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, configured_offloads,
-		      (uint64_t)OCTEONTX_RX_OFFLOADS);
-		return -ENOTSUP;
-	}
 	/* Rx deferred start is not supported */
 	if (rx_conf->rx_deferred_start) {
 		octeontx_log_err("rx deferred start not supported");
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e42d553..fc2b254 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -413,14 +413,16 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
 		     rx_queue_id, nb_rx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
-			  rx_conf, mb_pool);
+			  rx_conf, mb_pool, offloads);
 	if (rc != 0)
 		goto fail_rx_qinit;
 
@@ -469,13 +471,16 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 {
 	struct sfc_adapter *sa = dev->data->dev_private;
 	int rc;
+	uint64_t offloads;
 
 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
 		     tx_queue_id, nb_tx_desc, socket_id);
 
 	sfc_adapter_lock(sa);
 
-	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id,
+			  tx_conf, offloads);
 	if (rc != 0)
 		goto fail_tx_qinit;
 
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..dbdd000 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -830,32 +830,10 @@ sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 	}
 }
 
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
-	uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
-			     sfc_rx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_rx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
-		   const struct rte_eth_rxconf *rx_conf)
+		   const struct rte_eth_rxconf *rx_conf,
+		   uint64_t offloads)
 {
 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
 				      sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +858,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
 		rc = EINVAL;
 	}
 
-	if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+	if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
 	    DEV_RX_OFFLOAD_CHECKSUM)
 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
 
 	if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+	    (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
 
-	if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -998,7 +973,8 @@ int
 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_rx_desc, unsigned int socket_id,
 	     const struct rte_eth_rxconf *rx_conf,
-	     struct rte_mempool *mb_pool)
+	     struct rte_mempool *mb_pool,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	struct sfc_rss *rss = &sa->rss;
@@ -1020,7 +996,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
 
-	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -1033,7 +1009,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	}
 
 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-	    (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+	    (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
 			"object size is too small", sw_index);
 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1032,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
 
 	rxq_info->type_flags =
-		(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+		(offloads & DEV_RX_OFFLOAD_SCATTER) ?
 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8..2898fe5 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -138,7 +138,8 @@ void sfc_rx_stop(struct sfc_adapter *sa);
 int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
 		 uint16_t nb_rx_desc, unsigned int socket_id,
 		 const struct rte_eth_rxconf *rx_conf,
-		 struct rte_mempool *mb_pool);
+		 struct rte_mempool *mb_pool,
+		 uint64_t offloads);
 void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1cd08d8..a4a21fa 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -90,31 +90,9 @@ sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
 }
 
 static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
-	uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
-	uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
-			     sfc_tx_get_queue_offload_caps(sa);
-	uint64_t rejected = requested & ~supported;
-	uint64_t missing = (requested & mandatory) ^ mandatory;
-	boolean_t mismatch = B_FALSE;
-
-	if (rejected) {
-		sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
-		mismatch = B_TRUE;
-	}
-
-	if (missing) {
-		sfc_tx_log_offloads(sa, "queue", "must be set", missing);
-		mismatch = B_TRUE;
-	}
-
-	return mismatch;
-}
-
-static int
 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
-		   const struct rte_eth_txconf *tx_conf)
+		   const struct rte_eth_txconf *tx_conf,
+		   uint64_t offloads)
 {
 	int rc = 0;
 
@@ -138,15 +116,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
 	}
 
 	/* We either perform both TCP and UDP offload, or no offload at all */
-	if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
-	    ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+	if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+	    ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
 		sfc_err(sa, "TCP and UDP offloads can't be set independently");
 		rc = EINVAL;
 	}
 
-	if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
-		rc = EINVAL;
-
 	return rc;
 }
 
@@ -160,7 +135,8 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
 int
 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	     uint16_t nb_tx_desc, unsigned int socket_id,
-	     const struct rte_eth_txconf *tx_conf)
+	     const struct rte_eth_txconf *tx_conf,
+	     uint64_t offloads)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
 	unsigned int txq_entries;
@@ -183,7 +159,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	SFC_ASSERT(txq_entries >= nb_tx_desc);
 	SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
 
-	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+	rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
 	if (rc != 0)
 		goto fail_bad_conf;
 
@@ -210,7 +186,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
 		SFC_TX_DEFAULT_FREE_THRESH;
 	txq->flags = tx_conf->txq_flags;
-	txq->offloads = tx_conf->offloads;
+	txq->offloads = offloads;
 
 	rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
 			   socket_id, &txq->mem);
@@ -221,7 +197,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 	info.max_fill_level = txq_max_fill_level;
 	info.free_thresh = txq->free_thresh;
 	info.flags = tx_conf->txq_flags;
-	info.offloads = tx_conf->offloads;
+	info.offloads = offloads;
 	info.txq_entries = txq_info->entries;
 	info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
 	info.txq_hw_ring = txq->mem.esm_base;
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13..d2b2c4d 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -121,7 +121,8 @@ void sfc_tx_close(struct sfc_adapter *sa);
 
 int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
 		 uint16_t nb_tx_desc, unsigned int socket_id,
-		 const struct rte_eth_txconf *tx_conf);
+		 const struct rte_eth_txconf *tx_conf,
+		 uint64_t offloads);
 void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
 
 void sfc_tx_qflush_done(struct sfc_txq *txq);
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 172a7ba..78fe89b 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -280,21 +280,6 @@ tap_rx_offload_get_queue_capa(void)
 	       DEV_RX_OFFLOAD_CRC_STRIP;
 }
 
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 /* Callback to handle the rx burst of packets to the correct interface and
  * file descriptor(s) in a multi-queue setup.
  */
@@ -408,22 +393,6 @@ tap_tx_offload_get_queue_capa(void)
 	       DEV_TX_OFFLOAD_TCP_CKSUM;
 }
 
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
-	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
-	uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
-	uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
-	if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
-	    offloads)
-		return false;
-	/* Verify we have no conflict with port offloads */
-	if ((port_offloads ^ offloads) & port_supp_offloads)
-		return false;
-	return true;
-}
-
 static void
 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
 	       unsigned int l3_len)
@@ -668,18 +637,6 @@ tap_dev_stop(struct rte_eth_dev *dev)
 static int
 tap_dev_configure(struct rte_eth_dev *dev)
 {
-	uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa();
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
-	if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"Some Tx offloads are not supported "
-			"requested 0x%" PRIx64 " supported 0x%" PRIx64,
-			tx_offloads, supp_tx_offloads);
-		return -rte_errno;
-	}
 	if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
 		TAP_LOG(ERR,
 			"%s: number of rx queues %d exceeds max num of queues %d",
@@ -1081,19 +1038,6 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
 		return -1;
 	}
 
-	/* Verify application offloads are valid for our port and queue. */
-	if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
-		rte_errno = ENOTSUP;
-		TAP_LOG(ERR,
-			"%p: Rx queue offloads 0x%" PRIx64
-			" don't match port offloads 0x%" PRIx64
-			" or supported offloads 0x%" PRIx64,
-			(void *)dev, rx_conf->offloads,
-			dev->data->dev_conf.rxmode.offloads,
-			(tap_rx_offload_get_port_capa() |
-			 tap_rx_offload_get_queue_capa()));
-		return -rte_errno;
-	}
 	rxq->mp = mp;
 	rxq->trigger_seen = 1; /* force initial burst */
 	rxq->in_port = dev->data->port_id;
@@ -1157,35 +1101,19 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
 	struct pmd_internals *internals = dev->data->dev_private;
 	struct tx_queue *txq;
 	int ret;
+	uint64_t offloads;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -1;
 	dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
 	txq = dev->data->tx_queues[tx_queue_id];
-	/*
-	 * Don't verify port offloads for application which
-	 * use the old API.
-	 */
-	if (tx_conf != NULL &&
-	    !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
-		if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
-			txq->csum = !!(tx_conf->offloads &
-					(DEV_TX_OFFLOAD_IPV4_CKSUM |
-					 DEV_TX_OFFLOAD_UDP_CKSUM |
-					 DEV_TX_OFFLOAD_TCP_CKSUM));
-		} else {
-			rte_errno = ENOTSUP;
-			TAP_LOG(ERR,
-				"%p: Tx queue offloads 0x%" PRIx64
-				" don't match port offloads 0x%" PRIx64
-				" or supported offloads 0x%" PRIx64,
-				(void *)dev, tx_conf->offloads,
-				dev->data->dev_conf.txmode.offloads,
-				(tap_tx_offload_get_port_capa() |
-				tap_tx_offload_get_queue_capa()));
-			return -rte_errno;
-		}
-	}
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->csum = !!(offloads &
+			(DEV_TX_OFFLOAD_IPV4_CKSUM |
+			 DEV_TX_OFFLOAD_UDP_CKSUM |
+			 DEV_TX_OFFLOAD_TCP_CKSUM));
+
 	ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
 	if (ret == -1)
 		return -1;
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b673b47..23baa99 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -931,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -945,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-	conf_offloads = tx_conf->offloads;
-	offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	unsupported_offloads = conf_offloads & ~offload_capa;
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->tx_free_thresh = tx_free_thresh;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	txq->offloads = conf_offloads;
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads = offloads;
 
-	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+	is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1269,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
-	uint64_t conf_offloads, offload_capa, unsupported_offloads;
+	uint64_t offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
-
-	conf_offloads = rx_conf->offloads;
-
-	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	offload_capa = NICVF_RX_OFFLOAD_CAPA;
-	unsupported_offloads = conf_offloads & ~offload_capa;
-
-	if (unsupported_offloads) {
-		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
-		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      unsupported_offloads, conf_offloads, offload_capa);
-		return -ENOTSUP;
-	}
-
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1381,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+			rte_mempool_avail_count(mp), rxq->phys, offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1912,8 +1885,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
-	uint64_t conf_rx_offloads, rx_offload_capa;
-	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1922,32 +1893,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
-	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
-	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_tx_offloads, tx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
-		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
-		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
-	}
-
-	conf_rx_offloads = rxmode->offloads;
-	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
-	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
-		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
-		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
-		      conf_rx_offloads, rx_offload_capa);
-		return -ENOTSUP;
-	}
-
-	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a8aa87b..92fab21 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -385,10 +385,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
 			uint16_t nb_desc,
 			unsigned int socket_id __rte_unused,
-			const struct rte_eth_rxconf *rx_conf,
+			const struct rte_eth_rxconf *rx_conf __rte_unused,
 			struct rte_mempool *mp)
 {
-	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
@@ -408,10 +407,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			"Cannot allocate mbufs for rx virtqueue");
 	}
 
-	if ((rx_conf->offloads ^ rxmode->offloads) &
-	    VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS)
-		return -EINVAL;
-
 	dev->data->rx_queues[queue_idx] = rxvq;
 
 	return 0;
@@ -504,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 
 	/* cannot use simple rxtx funcs with multisegs or offloads */
-	if (tx_conf->offloads)
+	if (dev->data->dev_conf.txmode.offloads)
 		hw->use_simple_tx = 0;
 
 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index c850241..ba932ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -393,25 +393,9 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	size_t size;
-	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((rx_offloads & VMXNET3_RX_OFFLOAD_CAP) != rx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested RX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			rx_offloads, (uint64_t)VMXNET3_RX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
-	if ((tx_offloads & VMXNET3_TX_OFFLOAD_CAP) != tx_offloads) {
-		RTE_LOG(ERR, PMD, "Requested TX offloads 0x%" PRIx64
-			" do not match supported 0x%" PRIx64,
-			tx_offloads, (uint64_t)VMXNET3_TX_OFFLOAD_CAP);
-		return -ENOTSUP;
-	}
-
 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index f6e2d98..cf85f3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1013,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   uint16_t queue_idx,
 			   uint16_t nb_desc,
 			   unsigned int socket_id,
-			   const struct rte_eth_txconf *tx_conf)
+			   const struct rte_eth_txconf *tx_conf __rte_unused)
 {
 	struct vmxnet3_hw *hw = dev->data->dev_private;
 	const struct rte_memzone *mz;
@@ -1025,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
-		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-		return -EINVAL;
-	}
-
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
 			  RTE_CACHE_LINE_SIZE);
 	if (txq == NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index e560524..5baa2aa 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
+	/* Any requested offloading must be within its device capabilities */
+	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+	     local_conf.rxmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
+				"0x%" PRIx64 " doesn't match Rx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.rxmode.offloads,
+				dev_info.rx_offload_capa,
+				__func__);
+	}
+	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+	     local_conf.txmode.offloads) {
+		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
+				"0x%" PRIx64 " doesn't match Tx offloads "
+				"capabilities 0x%" PRIx64 " in %s( )\n",
+				port_id,
+				local_conf.txmode.offloads,
+				dev_info.tx_offload_capa,
+				__func__);
+	}
+
 	/* Check that device supports requested rss hash functions. */
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1504,6 +1526,38 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
+				"added offloads 0x%" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x%"
+				PRIx64 " in %s( )\n",
+				port_id,
+				rx_queue_id,
+				local_conf.offloads,
+				dev_info.rx_queue_offload_capa,
+				__func__);
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1612,6 +1666,38 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	/*
+	 * If an offloading has already been enabled in
+	 * rte_eth_dev_configure(), it has been enabled on all queues,
+	 * so there is no need to enable it in this queue again.
+	 * The local_conf.offloads input to underlying PMD only carries
+	 * those offloadings which are only enabled on this queue and
+	 * not enabled on all queues.
+	 * The underlying PMD must be aware of this point.
+	 */
+	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * New added offloadings for this queue are those not enabled in
+	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * A pure per-port offloading can't be enabled on a queue while
+	 * disabled on another queue. A pure per-port offloading can't
+	 * be enabled for any queue as new added one if it hasn't been
+	 * enabled in rte_eth_dev_configure( ).
+	 */
+	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+	     local_conf.offloads) {
+		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
+				"added offloads 0x%" PRIx64 " must be "
+				"within pre-queue offload capabilities 0x%"
+				PRIx64 " in %s( )\n",
+				port_id,
+				tx_queue_id,
+				local_conf.offloads,
+				dev_info.tx_queue_offload_capa,
+				__func__);
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index 7ccf4ba..e719442 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -1067,9 +1067,9 @@ struct rte_eth_dev_info {
 	uint16_t max_vfs; /**< Maximum number of VFs. */
 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
 	uint64_t rx_offload_capa;
-	/**< Device per port RX offload capabilities. */
+	/**< All RX offload capabilities including all per queue ones */
 	uint64_t tx_offload_capa;
-	/**< Device per port TX offload capabilities. */
+	/**< All TX offload capabilities.including all per-queue ones */
 	uint64_t rx_queue_offload_capa;
 	/**< Device per queue RX offload capabilities. */
 	uint64_t tx_queue_offload_capa;
@@ -1546,6 +1546,13 @@ const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
  *        The Rx offload bitfield API is obsolete and will be deprecated.
  *        Applications should set the ignore_bitfield_offloads bit on *rxmode*
  *        structure and use offloads field to set per-port offloads instead.
+ *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
+ *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
+ *        Any type of device supported offloading set in the input argument
+ *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
+ *        on all [RT]x queues and it can't be disabled no matter whether
+ *        it is cleared or set in the input argument [rt]x_conf->offloads
+ *        to rte_eth_[rt]x_queue_setup().
  *     - the Receive Side Scaling (RSS) configuration when using multiple RX
  *         queues per port.
  *
@@ -1602,6 +1609,10 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   ring.
  *   In addition it contains the hardware offloads features to activate using
  *   the DEV_RX_OFFLOAD_* flags.
+ *   If an offloading set in rx_conf->offloads
+ *   hasn't been set in the input argument eth_conf->rxmode.offloads
+ *   to rte_eth_dev_configure(), it is a new added offloading, it must be
+ *   per-queue type and it is enabled for the queue.
  * @param mb_pool
  *   The pointer to the memory pool from which to allocate *rte_mbuf* network
  *   memory buffers to populate each descriptor of the receive ring.
@@ -1660,7 +1671,10 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
  *     should set it to ETH_TXQ_FLAGS_IGNORE and use
  *     the offloads field below.
  *   - The *offloads* member contains Tx offloads to be enabled.
- *     Offloads which are not set cannot be used on the datapath.
+ *     If an offloading set in tx_conf->offloads
+ *     hasn't been set in the input argument eth_conf->txmode.offloads
+ *     to rte_eth_dev_configure(), it is a new added offloading, it must be
+ *     per-queue type and it is enabled for the queue.
  *
  *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
  *     the transmit function to use default values.
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  9:25                 ` Andrew Rybchenko
@ 2018-05-10 19:47                   ` Ferruh Yigit
  0 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-10 19:47 UTC (permalink / raw)
  To: Andrew Rybchenko, Wei Dai, thomas; +Cc: dev, Qi Zhang

On 5/10/2018 10:25 AM, Andrew Rybchenko wrote:
> On 05/10/2018 03:56 AM, Wei Dai wrote:
>> This patch check if a input requested offloading is valid or not.
>> Any reuqested offloading must be supported in the device capabilities.
>> Any offloading is disabled by default if it is not set in the parameter
>> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>> If any offloading is enabled in rte_eth_dev_configure( ) by application,
>> it is enabled on all queues no matter whether it is per-queue or
>> per-port type and no matter whether it is set or cleared in
>> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
>> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
>> it can be enabled or disabled for individual queue in
>> ret_eth_[rt]x_queue_setup( ).
>> A new added offloading is the one which hasn't been enabled in
>> rte_eth_dev_configure( ) and is reuqested to be enabled in
>> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
>> otherwise triger an error log.
>> The underlying PMD must be aware that the requested offloadings
>> to PMD specific queue_setup( ) function only carries those
>> new added offloadings of per-queue type.
>>
>> This patch can make above such checking in a common way in rte_ethdev
>> layer to avoid same checking in underlying PMD.
>>
>> This patch assumes that all PMDs in 18.05-rc2 have already
>> converted to offload API defined in 17.11 . It also assumes
>> that all PMDs can return correct offloading capabilities
>> in rte_eth_dev_infos_get( ).
>>
>> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
>> add offloads = [rt]xconf->offloads |
>> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
>> defined in 17.11 to avoid upper application broken due to offload
>> API change.
>> PMD can use the info that input [rt]xconf->offloads only carry
>> the new added per-queue offloads to do some optimization or some
>> code change on base of this patch.
>>
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>>
>> ---
>> v10:
>> sorry, miss the code change, fix the buidling error
>>
>> v9:
>> replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
>> to avoid failure of application which hasn't been completely
>> converted to new offload API.
> 
> [...]
> 
>> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
>> index e560524..5baa2aa 100644
>> --- a/lib/librte_ethdev/rte_ethdev.c
>> +++ b/lib/librte_ethdev/rte_ethdev.c
>> @@ -1139,6 +1139,28 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>>  							ETHER_MAX_LEN;
>>  	}
>>  
>> +	/* Any requested offloading must be within its device capabilities */
>> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
>> +	     local_conf.rxmode.offloads) {
>> +		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
>> +				"0x%" PRIx64 " doesn't match Rx offloads "
>> +				"capabilities 0x%" PRIx64 " in %s( )\n",
>> +				port_id,
>> +				local_conf.rxmode.offloads,
>> +				dev_info.rx_offload_capa,
>> +				__func__);
> 
> Why is return -EINVAL removed here?
> If application is not updated to use offloads, offloads is 0 and everything is OK.
> If application is updated to use offloads, its behaviour must be consistent.
> Same below for Tx device offloads.

To be cautious to not break apps for the cases we have missed. For example
testpmd was giving error with virtual PMDs because of CRC_STRIP, it is easy to
fix testpmd but other applications too may have similar problem.

Overall agree that error should be return, for this release it will only print
error log, next release we can add return back. Next release applications will
be switched to new offloading API so they will already need to be updated, and
hopefully that change will be before rc stage.

> 
>> +	}
>> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
>> +	     local_conf.txmode.offloads) {
>> +		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
>> +				"0x%" PRIx64 " doesn't match Tx offloads "
>> +				"capabilities 0x%" PRIx64 " in %s( )\n",
>> +				port_id,
>> +				local_conf.txmode.offloads,
>> +				dev_info.tx_offload_capa,
>> +				__func__);
>> +	}
>> +
>>  	/* Check that device supports requested rss hash functions. */
>>  	if ((dev_info.flow_type_rss_offloads |
>>  	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
>> @@ -1504,6 +1526,38 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>>  						    &local_conf.offloads);
>>  	}
>>  
>> +	/*
>> +	 * If an offloading has already been enabled in
>> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
>> +	 * so there is no need to enable it in this queue again.
>> +	 * The local_conf.offloads input to underlying PMD only carries
>> +	 * those offloadings which are only enabled on this queue and
>> +	 * not enabled on all queues.
>> +	 * The underlying PMD must be aware of this point.
>> +	 */
>> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
>> +
>> +	/*
>> +	 * New added offloadings for this queue are those not enabled in
>> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
>> +	 * A pure per-port offloading can't be enabled on a queue while
>> +	 * disabled on another queue. A pure per-port offloading can't
>> +	 * be enabled for any queue as new added one if it hasn't been
>> +	 * enabled in rte_eth_dev_configure( ).
>> +	 */
>> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
>> +	     local_conf.offloads) {
>> +		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
>> +				"added offloads 0x%" PRIx64 " must be "
>> +				"within pre-queue offload capabilities 0x%"
>> +				PRIx64 " in %s( )\n",
>> +				port_id,
>> +				rx_queue_id,
>> +				local_conf.offloads,
>> +				dev_info.rx_queue_offload_capa,
>> +				__func__);
> 
> May be it is really a good tradeoff to remove error return here.
> Ideally it would be nice to see explanation here why.
> 
>> +	}
>> +
>>  	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
>>  					      socket_id, &local_conf, mp);
>>  	if (!ret) {
>> @@ -1612,6 +1666,38 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
>>  					  &local_conf.offloads);
>>  	}
>>  
>> +	/*
>> +	 * If an offloading has already been enabled in
>> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
>> +	 * so there is no need to enable it in this queue again.
>> +	 * The local_conf.offloads input to underlying PMD only carries
>> +	 * those offloadings which are only enabled on this queue and
>> +	 * not enabled on all queues.
>> +	 * The underlying PMD must be aware of this point.
>> +	 */
>> +	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
>> +
>> +	/*
>> +	 * New added offloadings for this queue are those not enabled in
>> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
>> +	 * A pure per-port offloading can't be enabled on a queue while
>> +	 * disabled on another queue. A pure per-port offloading can't
>> +	 * be enabled for any queue as new added one if it hasn't been
>> +	 * enabled in rte_eth_dev_configure( ).
>> +	 */
>> +	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
>> +	     local_conf.offloads) {
>> +		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
>> +				"added offloads 0x%" PRIx64 " must be "
>> +				"within pre-queue offload capabilities 0x%"
>> +				PRIx64 " in %s( )\n",
>> +				port_id,
>> +				tx_queue_id,
>> +				local_conf.offloads,
>> +				dev_info.tx_queue_offload_capa,
>> +				__func__);
>> +	}
>> +
>>  	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
>>  		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
>>  }
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v10] ethdev: new Rx/Tx offloads API
  2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
                                   ` (3 preceding siblings ...)
  2018-05-10 11:30                 ` [dpdk-dev] [PATCH v11] " Wei Dai
@ 2018-05-10 21:08                 ` Ferruh Yigit
  4 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-10 21:08 UTC (permalink / raw)
  To: Wei Dai, thomas; +Cc: dev, Qi Zhang, John McNamara

On 5/10/2018 1:56 AM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure( ) and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If any offloading is enabled in rte_eth_dev_configure( ) by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup( ).
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure( ),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup( ).
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure( ) and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup( ), it must be per-queue type,
> otherwise triger an error log.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup( ) function only carries those
> new added offloadings of per-queue type.
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get( ).
> 
> In the beginning of [rt]x_queue_setup( ) of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> ---
> v10:
> sorry, miss the code change, fix the buidling error
> 
> v9:
> replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
> to avoid failure of application which hasn't been completely
> converted to new offload API.
> 
> v8:
> Revise PMD codes to comply with offload API in v7
> update document
> 
> v7:
> Give the maximum freedom for upper application,
> only minimal checking is performed in ethdev layer.
> Only requested specific pure per-queue offloadings are input
> to underlying PMD.
> 
> v6:
> No need enable an offload in queue_setup( ) if it has already
> been enabled in dev_configure( )
> 
> v5:
> keep offload settings sent to PMD same as those from application
> 
> v4:
> fix a wrong description in git log message.
> 
> v3:
> rework according to dicision of offloading API in community
> 
> v2:
> add offloads checking in rte_eth_dev_configure( ).
> check if a requested offloading is supported.
> ---
>  doc/guides/prog_guide/poll_mode_drv.rst |  26 +++--

Following are the documentation update suggestions by John [1], since the new
version is sent I will apply applicable ones into that new commit before merge.

[1]
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst
b/doc/guides/prog_guide/poll_mode_drv.rst
index bbb85f0..bd66e64 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -296,33 +296,37 @@ described in the mbuf API documentation and in the in
:ref:`Mbuf Library
 Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

-In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
-A per-queue offloading can be enabled on a queue and disabled on another queue
at the same time.
-A pure per-port offload is the one supported by device but not per-queue type.
-A pure per-port offloading can't be enabled on a queue and disabled on another
queue at the same time.
-A pure per-port offloading must be enabled or disabled on all queues at the
same time.
-Any offloading is per-queue or pure per-port type, but can't be both types at
same devices.
-A per-port offloading can be enabled or disabled on all queues at the same time.
-It is certain that both per-queue and pure per-port offloading are per-port type.
+In the DPDK offload API, offloads are divided into per-port and per-queue
offloads as follows:
+
+* A per-queue offloading can be enabled on a queue and disabled on another
queue at the same time.
+* A pure per-port offload is the one supported by device but not per-queue type.
+* A pure per-port offloading can't be enabled on a queue and disabled on
another queue at the same time.
+* A pure per-port offloading must be enabled or disabled on all queues at the
same time.
+* Any offloading is per-queue or pure per-port type, but can't be both types at
same devices.
+* A per-port offloading can be enabled or disabled on all queues at the same time.
+* It is certain that both per-queue and pure per-port offloading are per-port type.
+
 The different offloads capabilities can be queried using
``rte_eth_dev_info_get()``.
-The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()``
includes all per-queue offloading capabilities.
-The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()``
includes all per-port and per-queue offloading capabilities.
+The ``dev_info->[rt]x_queue_offload_capa`` returned from
``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The ``dev_info->[rt]x_offload_capa returned`` from ``rte_eth_dev_info_get()``
includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.

 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or
``DEV_RX_OFFLOAD_*`` flags.
-Any requested offloading by application must be within the device capabilities.
+Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
-dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure()`` and
-[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
-If any offloading is enabled in ``rte_eth_dev_configure()`` by application,
+``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
+``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()``.
+
+If any offloading is enabled in ``rte_eth_dev_configure()`` by an application,
 it is enabled on all queues no matter whether it is per-queue or
 per-port type and no matter whether it is set or cleared in
-[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
+``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()``.
+
 If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure()``,
 it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup()`` for individual
queue.
-A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``
input by application
+A newly added offload in ``[rt]x_conf->offloads`` to
``rte_eth_[rt]x_queue_setup()`` input by an application
 is the one which hasn't been enabled in ``rte_eth_dev_configure()`` and is
requested to be enabled
-in ``rte_eth_[rt]x_queue_setup()``, it must be per-queue type, otherwise
trigger an error log.
+in ``rte_eth_[rt]x_queue_setup()``. It must be per-queue type, otherwise
trigger an error log.

 For an application to use the Tx offloads API it should set the
``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in
``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst
b/doc/guides/prog_guide/poll_mode_drv.rst
index bbb85f0..bd66e64 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -296,33 +296,37 @@ described in the mbuf API documentation and in the in
:ref:`Mbuf Library
 Per-Port and Per-Queue Offloads
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

-In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
-A per-queue offloading can be enabled on a queue and disabled on another queue
at the same time.
-A pure per-port offload is the one supported by device but not per-queue type.
-A pure per-port offloading can't be enabled on a queue and disabled on another
queue at the same time.
-A pure per-port offloading must be enabled or disabled on all queues at the
same time.
-Any offloading is per-queue or pure per-port type, but can't be both types at
same devices.
-A per-port offloading can be enabled or disabled on all queues at the same time.
-It is certain that both per-queue and pure per-port offloading are per-port type.
+In the DPDK offload API, offloads are divided into per-port and per-queue
offloads as follows:
+
+* A per-queue offloading can be enabled on a queue and disabled on another
queue at the same time.
+* A pure per-port offload is the one supported by device but not per-queue type.
+* A pure per-port offloading can't be enabled on a queue and disabled on
another queue at the same time.
+* A pure per-port offloading must be enabled or disabled on all queues at the
same time.
+* Any offloading is per-queue or pure per-port type, but can't be both types at
same devices.
+* A per-port offloading can be enabled or disabled on all queues at the same time.
+* It is certain that both per-queue and pure per-port offloading are per-port type.
+
 The different offloads capabilities can be queried using
``rte_eth_dev_info_get()``.
-The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()``
includes all per-queue offloading capabilities.
-The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()``
includes all per-port and per-queue offloading capabilities.
+The ``dev_info->[rt]x_queue_offload_capa`` returned from
``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The ``dev_info->[rt]x_offload_capa returned`` from ``rte_eth_dev_info_get()``
includes all per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.

 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or
``DEV_RX_OFFLOAD_*`` flags.
-Any requested offloading by application must be within the device capabilities.
+Any requested offloading by an application must be within the device capabilities.
 Any offloading is disabled by default if it is not set in the parameter
-dev_conf->[rt]xmode.offloads to ``rte_eth_dev_configure()`` and
-[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
-If any offloading is enabled in ``rte_eth_dev_configure()`` by application,
+``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
+``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()``.
+
+If any offloading is enabled in ``rte_eth_dev_configure()`` by an application,
 it is enabled on all queues no matter whether it is per-queue or
 per-port type and no matter whether it is set or cleared in
-[rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``.
+``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()``.
+
 If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure()``,
 it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup()`` for individual
queue.
-A new added offloads in [rt]x_conf->offloads to ``rte_eth_[rt]x_queue_setup()``
input by application
+A newly added offload in ``[rt]x_conf->offloads`` to
``rte_eth_[rt]x_queue_setup()`` input by an application
 is the one which hasn't been enabled in ``rte_eth_dev_configure()`` and is
requested to be enabled
-in ``rte_eth_[rt]x_queue_setup()``, it must be per-queue type, otherwise
trigger an error log.
+in ``rte_eth_[rt]x_queue_setup()``. It must be per-queue type, otherwise
trigger an error log.

 For an application to use the Tx offloads API it should set the
``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in
``rte_eth_txconf`` struct.
 In such cases it is not required to set other flags in ``txq_flags``.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v12] ethdev: new Rx/Tx offloads API
  2018-05-10 11:56                   ` [dpdk-dev] [PATCH v12] " Wei Dai
@ 2018-05-10 21:39                     ` Thomas Monjalon
  2018-05-14  8:37                       ` Thomas Monjalon
  2018-05-10 21:48                     ` Ferruh Yigit
  2018-05-14 12:00                     ` [dpdk-dev] [PATCH v13] " Wei Dai
  2 siblings, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-05-10 21:39 UTC (permalink / raw)
  To: Wei Dai; +Cc: dev, ferruh.yigit, Qi Zhang

Hi,

A first general comment: a lot of spaces are still inside parens.
You can grep '( )'.

10/05/2018 13:56, Wei Dai:
> --- a/doc/guides/prog_guide/poll_mode_drv.rst
> +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> +A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
> +A pure per-port offload is the one supported by device but not per-queue type.

Another way to say it: pure per-port offloads are not directly advertised but
are the port offloads capabilities minus the queue capabilities.
port capabilities = pure per-port capabilities + queue capabilities

> +A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
> +A pure per-port offloading must be enabled or disabled on all queues at the same time.
> +Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
> +A per-port offloading can be enabled or disabled on all queues at the same time.

This sentence is useless: it says any offload can be setup for the whole port.

> +It is certain that both per-queue and pure per-port offloading are per-port type.

This sentence is confusing. I cannot understand it.


>  The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
> +The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> +The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.

If you want to stick with pure per-port wording, you should say
[rt]x_offload_capa is the port capabilities (including pure per-port and per-queue).


> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> +	/* Any requested offloading must be within its device capabilities */
> +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> +	     local_conf.rxmode.offloads) {
> +		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
> +				"0x%" PRIx64 " doesn't match Rx offloads "
> +				"capabilities 0x%" PRIx64 " in %s( )\n",
> +				port_id,
> +				local_conf.rxmode.offloads,
> +				dev_info.rx_offload_capa,
> +				__func__);

We could have a comment saying that an error will be returned in next version.

> +	}
> +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> +	     local_conf.txmode.offloads) {
> +		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
> +				"0x%" PRIx64 " doesn't match Tx offloads "
> +				"capabilities 0x%" PRIx64 " in %s( )\n",
> +				port_id,
> +				local_conf.txmode.offloads,
> +				dev_info.tx_offload_capa,
> +				__func__);

idem

> +	}


> +	/*
> +	 * If an offloading has already been enabled in
> +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> +	 * so there is no need to enable it in this queue again.
> +	 * The local_conf.offloads input to underlying PMD only carries
> +	 * those offloadings which are only enabled on this queue and
> +	 * not enabled on all queues.
> +	 * The underlying PMD must be aware of this point.

I think the last sentence is useless.

> +	 */
> +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> +
> +	/*
> +	 * New added offloadings for this queue are those not enabled in
> +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> +	 * A pure per-port offloading can't be enabled on a queue while
> +	 * disabled on another queue. A pure per-port offloading can't
> +	 * be enabled for any queue as new added one if it hasn't been
> +	 * enabled in rte_eth_dev_configure( ).
> +	 */
> +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> +	     local_conf.offloads) {
> +		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
> +				"added offloads 0x%" PRIx64 " must be "
> +				"within pre-queue offload capabilities 0x%"
> +				PRIx64 " in %s( )\n",
> +				port_id,
> +				rx_queue_id,
> +				local_conf.offloads,
> +				dev_info.rx_queue_offload_capa,
> +				__func__);

idem, we can have a comment about error in next version

> +	}


> --- a/lib/librte_ethdev/rte_ethdev.h
> +++ b/lib/librte_ethdev/rte_ethdev.h
>  	uint64_t rx_offload_capa;
> -	/**< Device per port RX offload capabilities. */
> +	/**< All RX offload capabilities including all per queue ones */

OK
per queue -> per-queue

>  	uint64_t tx_offload_capa;
> -	/**< Device per port TX offload capabilities. */
> +	/**< All TX offload capabilities.including all per-queue ones */

Typo: there is a dot instead of space.

>  	uint64_t rx_queue_offload_capa;
>  	/**< Device per queue RX offload capabilities. */

Here you should add more comments:
	No need to repeat flags already enabled at port level.
	A flag enabled at port level, cannot be disabled at queue level.


> + *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
> + *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().

OK

> + *        Any type of device supported offloading set in the input argument
> + *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
> + *        on all [RT]x queues and it can't be disabled no matter whether
> + *        it is cleared or set in the input argument [rt]x_conf->offloads
> + *        to rte_eth_[rt]x_queue_setup().

last part can be simpler: cannot be disabled in queue setup.
"[RT]x queues" can be simply "queues".


> + *   If an offloading set in rx_conf->offloads
> + *   hasn't been set in the input argument eth_conf->rxmode.offloads
> + *   to rte_eth_dev_configure(), it is a new added offloading, it must be
> + *   per-queue type and it is enabled for the queue.

OK
Another wording:
The offloads not advertised in queue capabilities, and not already enabled
at port level, are rejected.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v12] ethdev: new Rx/Tx offloads API
  2018-05-10 11:56                   ` [dpdk-dev] [PATCH v12] " Wei Dai
  2018-05-10 21:39                     ` Thomas Monjalon
@ 2018-05-10 21:48                     ` Ferruh Yigit
  2018-05-14 12:00                     ` [dpdk-dev] [PATCH v13] " Wei Dai
  2 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-10 21:48 UTC (permalink / raw)
  To: Wei Dai, thomas; +Cc: dev, Qi Zhang

On 5/10/2018 12:56 PM, Wei Dai wrote:
> This patch check if a input requested offloading is valid or not.
> Any reuqested offloading must be supported in the device capabilities.
> Any offloading is disabled by default if it is not set in the parameter
> dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
> If any offloading is enabled in rte_eth_dev_configure() by application,
> it is enabled on all queues no matter whether it is per-queue or
> per-port type and no matter whether it is set or cleared in
> [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
> If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(),
> it can be enabled or disabled for individual queue in
> ret_eth_[rt]x_queue_setup().
> A new added offloading is the one which hasn't been enabled in
> rte_eth_dev_configure() and is reuqested to be enabled in
> rte_eth_[rt]x_queue_setup(), it must be per-queue type,
> otherwise trigger an error log.
> The underlying PMD must be aware that the requested offloadings
> to PMD specific queue_setup() function only carries those
> new added offloadings of per-queue type.
> 
> This patch can make above such checking in a common way in rte_ethdev
> layer to avoid same checking in underlying PMD.
> 
> This patch assumes that all PMDs in 18.05-rc2 have already
> converted to offload API defined in 17.11 . It also assumes
> that all PMDs can return correct offloading capabilities
> in rte_eth_dev_infos_get().
> 
> In the beginning of [rt]x_queue_setup() of underlying PMD,
> add offloads = [rt]xconf->offloads |
> dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
> defined in 17.11 to avoid upper application broken due to offload
> API change.
> PMD can use the info that input [rt]xconf->offloads only carry
> the new added per-queue offloads to do some optimization or some
> code change on base of this patch.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

In next-net replaced this one (v12) with existing v10.

Applied John's comments on doc.
Squashed sfc patches.

For next version of the patch can you please either get the patch from next-net
and update it or make incremental changes to on top of next-net which can be
squashed later into this patch.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v12] ethdev: new Rx/Tx offloads API
  2018-05-10 21:39                     ` Thomas Monjalon
@ 2018-05-14  8:37                       ` Thomas Monjalon
  2018-05-14 11:19                         ` Dai, Wei
  0 siblings, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-05-14  8:37 UTC (permalink / raw)
  To: Wei Dai; +Cc: dev, ferruh.yigit, Qi Zhang

Wei Dai,
Do you agree with my comments?
Could we have a wording patch to squash in RC3?


10/05/2018 23:39, Thomas Monjalon:
> Hi,
> 
> A first general comment: a lot of spaces are still inside parens.
> You can grep '( )'.
> 
> 10/05/2018 13:56, Wei Dai:
> > --- a/doc/guides/prog_guide/poll_mode_drv.rst
> > +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> > +A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
> > +A pure per-port offload is the one supported by device but not per-queue type.
> 
> Another way to say it: pure per-port offloads are not directly advertised but
> are the port offloads capabilities minus the queue capabilities.
> port capabilities = pure per-port capabilities + queue capabilities
> 
> > +A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
> > +A pure per-port offloading must be enabled or disabled on all queues at the same time.
> > +Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
> > +A per-port offloading can be enabled or disabled on all queues at the same time.
> 
> This sentence is useless: it says any offload can be setup for the whole port.
> 
> > +It is certain that both per-queue and pure per-port offloading are per-port type.
> 
> This sentence is confusing. I cannot understand it.
> 
> 
> >  The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
> > +The dev_info->[rt]x_queue_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> > +The dev_info->[rt]x_offload_capa returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
> 
> If you want to stick with pure per-port wording, you should say
> [rt]x_offload_capa is the port capabilities (including pure per-port and per-queue).
> 
> 
> > --- a/lib/librte_ethdev/rte_ethdev.c
> > +++ b/lib/librte_ethdev/rte_ethdev.c
> > +	/* Any requested offloading must be within its device capabilities */
> > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > +	     local_conf.rxmode.offloads) {
> > +		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
> > +				"0x%" PRIx64 " doesn't match Rx offloads "
> > +				"capabilities 0x%" PRIx64 " in %s( )\n",
> > +				port_id,
> > +				local_conf.rxmode.offloads,
> > +				dev_info.rx_offload_capa,
> > +				__func__);
> 
> We could have a comment saying that an error will be returned in next version.
> 
> > +	}
> > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > +	     local_conf.txmode.offloads) {
> > +		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
> > +				"0x%" PRIx64 " doesn't match Tx offloads "
> > +				"capabilities 0x%" PRIx64 " in %s( )\n",
> > +				port_id,
> > +				local_conf.txmode.offloads,
> > +				dev_info.tx_offload_capa,
> > +				__func__);
> 
> idem
> 
> > +	}
> 
> 
> > +	/*
> > +	 * If an offloading has already been enabled in
> > +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> > +	 * so there is no need to enable it in this queue again.
> > +	 * The local_conf.offloads input to underlying PMD only carries
> > +	 * those offloadings which are only enabled on this queue and
> > +	 * not enabled on all queues.
> > +	 * The underlying PMD must be aware of this point.
> 
> I think the last sentence is useless.
> 
> > +	 */
> > +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> > +
> > +	/*
> > +	 * New added offloadings for this queue are those not enabled in
> > +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> > +	 * A pure per-port offloading can't be enabled on a queue while
> > +	 * disabled on another queue. A pure per-port offloading can't
> > +	 * be enabled for any queue as new added one if it hasn't been
> > +	 * enabled in rte_eth_dev_configure( ).
> > +	 */
> > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > +	     local_conf.offloads) {
> > +		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
> > +				"added offloads 0x%" PRIx64 " must be "
> > +				"within pre-queue offload capabilities 0x%"
> > +				PRIx64 " in %s( )\n",
> > +				port_id,
> > +				rx_queue_id,
> > +				local_conf.offloads,
> > +				dev_info.rx_queue_offload_capa,
> > +				__func__);
> 
> idem, we can have a comment about error in next version
> 
> > +	}
> 
> 
> > --- a/lib/librte_ethdev/rte_ethdev.h
> > +++ b/lib/librte_ethdev/rte_ethdev.h
> >  	uint64_t rx_offload_capa;
> > -	/**< Device per port RX offload capabilities. */
> > +	/**< All RX offload capabilities including all per queue ones */
> 
> OK
> per queue -> per-queue
> 
> >  	uint64_t tx_offload_capa;
> > -	/**< Device per port TX offload capabilities. */
> > +	/**< All TX offload capabilities.including all per-queue ones */
> 
> Typo: there is a dot instead of space.
> 
> >  	uint64_t rx_queue_offload_capa;
> >  	/**< Device per queue RX offload capabilities. */
> 
> Here you should add more comments:
> 	No need to repeat flags already enabled at port level.
> 	A flag enabled at port level, cannot be disabled at queue level.
> 
> 
> > + *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
> > + *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
> 
> OK
> 
> > + *        Any type of device supported offloading set in the input argument
> > + *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
> > + *        on all [RT]x queues and it can't be disabled no matter whether
> > + *        it is cleared or set in the input argument [rt]x_conf->offloads
> > + *        to rte_eth_[rt]x_queue_setup().
> 
> last part can be simpler: cannot be disabled in queue setup.
> "[RT]x queues" can be simply "queues".
> 
> 
> > + *   If an offloading set in rx_conf->offloads
> > + *   hasn't been set in the input argument eth_conf->rxmode.offloads
> > + *   to rte_eth_dev_configure(), it is a new added offloading, it must be
> > + *   per-queue type and it is enabled for the queue.
> 
> OK
> Another wording:
> The offloads not advertised in queue capabilities, and not already enabled
> at port level, are rejected.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v12] ethdev: new Rx/Tx offloads API
  2018-05-14  8:37                       ` Thomas Monjalon
@ 2018-05-14 11:19                         ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-14 11:19 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, Yigit, Ferruh, Zhang, Qi Z

Hi, Thomas & Ferruh
Thanks for your feedback.
I agree with your comments and I am working on the latest commit of the repo dpdk-next-net.
I will submit a new patch to adopt your suggestion.
Please wait for a while ...


> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Monday, May 14, 2018 4:37 PM
> To: Dai, Wei <wei.dai@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v12] ethdev: new Rx/Tx offloads API
> 
> Wei Dai,
> Do you agree with my comments?
> Could we have a wording patch to squash in RC3?
> 
> 
> 10/05/2018 23:39, Thomas Monjalon:
> > Hi,
> >
> > A first general comment: a lot of spaces are still inside parens.
> > You can grep '( )'.
> >
> > 10/05/2018 13:56, Wei Dai:
> > > --- a/doc/guides/prog_guide/poll_mode_drv.rst
> > > +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> > > +A per-queue offloading can be enabled on a queue and disabled on
> another queue at the same time.
> > > +A pure per-port offload is the one supported by device but not
> per-queue type.
> >
> > Another way to say it: pure per-port offloads are not directly
> > advertised but are the port offloads capabilities minus the queue
> capabilities.
> > port capabilities = pure per-port capabilities + queue capabilities
> >
> > > +A pure per-port offloading can't be enabled on a queue and disabled on
> another queue at the same time.
> > > +A pure per-port offloading must be enabled or disabled on all queues at
> the same time.
> > > +Any offloading is per-queue or pure per-port type, but can't be both
> types at same devices.
> > > +A per-port offloading can be enabled or disabled on all queues at the
> same time.
> >
> > This sentence is useless: it says any offload can be setup for the whole
> port.
> >
> > > +It is certain that both per-queue and pure per-port offloading are
> per-port type.
> >
> > This sentence is confusing. I cannot understand it.
> >
> >
> > >  The different offloads capabilities can be queried using
> ``rte_eth_dev_info_get()``.
> > > +The dev_info->[rt]x_queue_offload_capa returned from
> ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> > > +The dev_info->[rt]x_offload_capa returned from
> ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading
> capabilities.
> >
> > If you want to stick with pure per-port wording, you should say
> > [rt]x_offload_capa is the port capabilities (including pure per-port and
> per-queue).
> >
> >
> > > --- a/lib/librte_ethdev/rte_ethdev.c
> > > +++ b/lib/librte_ethdev/rte_ethdev.c
> > > +	/* Any requested offloading must be within its device capabilities
> */
> > > +	if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
> > > +	     local_conf.rxmode.offloads) {
> > > +		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
> > > +				"0x%" PRIx64 " doesn't match Rx offloads "
> > > +				"capabilities 0x%" PRIx64 " in %s( )\n",
> > > +				port_id,
> > > +				local_conf.rxmode.offloads,
> > > +				dev_info.rx_offload_capa,
> > > +				__func__);
> >
> > We could have a comment saying that an error will be returned in next
> version.
> >
> > > +	}
> > > +	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
> > > +	     local_conf.txmode.offloads) {
> > > +		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
> > > +				"0x%" PRIx64 " doesn't match Tx offloads "
> > > +				"capabilities 0x%" PRIx64 " in %s( )\n",
> > > +				port_id,
> > > +				local_conf.txmode.offloads,
> > > +				dev_info.tx_offload_capa,
> > > +				__func__);
> >
> > idem
> >
> > > +	}
> >
> >
> > > +	/*
> > > +	 * If an offloading has already been enabled in
> > > +	 * rte_eth_dev_configure(), it has been enabled on all queues,
> > > +	 * so there is no need to enable it in this queue again.
> > > +	 * The local_conf.offloads input to underlying PMD only carries
> > > +	 * those offloadings which are only enabled on this queue and
> > > +	 * not enabled on all queues.
> > > +	 * The underlying PMD must be aware of this point.
> >
> > I think the last sentence is useless.
> >
> > > +	 */
> > > +	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
> > > +
> > > +	/*
> > > +	 * New added offloadings for this queue are those not enabled in
> > > +	 * rte_eth_dev_configure( ) and they must be per-queue type.
> > > +	 * A pure per-port offloading can't be enabled on a queue while
> > > +	 * disabled on another queue. A pure per-port offloading can't
> > > +	 * be enabled for any queue as new added one if it hasn't been
> > > +	 * enabled in rte_eth_dev_configure( ).
> > > +	 */
> > > +	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
> > > +	     local_conf.offloads) {
> > > +		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
> > > +				"added offloads 0x%" PRIx64 " must be "
> > > +				"within pre-queue offload capabilities 0x%"
> > > +				PRIx64 " in %s( )\n",
> > > +				port_id,
> > > +				rx_queue_id,
> > > +				local_conf.offloads,
> > > +				dev_info.rx_queue_offload_capa,
> > > +				__func__);
> >
> > idem, we can have a comment about error in next version
> >
> > > +	}
> >
> >
> > > --- a/lib/librte_ethdev/rte_ethdev.h
> > > +++ b/lib/librte_ethdev/rte_ethdev.h
> > >  	uint64_t rx_offload_capa;
> > > -	/**< Device per port RX offload capabilities. */
> > > +	/**< All RX offload capabilities including all per queue ones */
> >
> > OK
> > per queue -> per-queue
> >
> > >  	uint64_t tx_offload_capa;
> > > -	/**< Device per port TX offload capabilities. */
> > > +	/**< All TX offload capabilities.including all per-queue ones */
> >
> > Typo: there is a dot instead of space.
> >
> > >  	uint64_t rx_queue_offload_capa;
> > >  	/**< Device per queue RX offload capabilities. */
> >
> > Here you should add more comments:
> > 	No need to repeat flags already enabled at port level.
> > 	A flag enabled at port level, cannot be disabled at queue level.
> >
> >
> > > + *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be
> within
> > > + *        the [rt]x_offload_capa returned from
> rte_eth_dev_infos_get().
> >
> > OK
> >
> > > + *        Any type of device supported offloading set in the input
> argument
> > > + *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is
> enabled
> > > + *        on all [RT]x queues and it can't be disabled no matter
> whether
> > > + *        it is cleared or set in the input argument
> [rt]x_conf->offloads
> > > + *        to rte_eth_[rt]x_queue_setup().
> >
> > last part can be simpler: cannot be disabled in queue setup.
> > "[RT]x queues" can be simply "queues".
> >
> >
> > > + *   If an offloading set in rx_conf->offloads
> > > + *   hasn't been set in the input argument eth_conf->rxmode.offloads
> > > + *   to rte_eth_dev_configure(), it is a new added offloading, it must
> be
> > > + *   per-queue type and it is enabled for the queue.
> >
> > OK
> > Another wording:
> > The offloads not advertised in queue capabilities, and not already
> > enabled at port level, are rejected.
> 
> 
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v13] ethdev: new Rx/Tx offloads API
  2018-05-10 11:56                   ` [dpdk-dev] [PATCH v12] " Wei Dai
  2018-05-10 21:39                     ` Thomas Monjalon
  2018-05-10 21:48                     ` Ferruh Yigit
@ 2018-05-14 12:00                     ` Wei Dai
  2018-05-14 12:54                       ` Thomas Monjalon
  2018-05-14 13:20                       ` [dpdk-dev] [PATCH v14] " Wei Dai
  2 siblings, 2 replies; 60+ messages in thread
From: Wei Dai @ 2018-05-14 12:00 UTC (permalink / raw)
  To: thomas, ferruh.yigit; +Cc: dev, Wei Dai, Qi Zhang

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If any offloading is enabled in rte_eth_dev_configure() by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup().
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure() and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup(), it must be per-queue type,
otherwise trigger an error log.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup() function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get().

In the beginning of [rt]x_queue_setup() of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v13:
only rework on v12 according to feedback.
This version is based on commit dc33238da4be
("app/testpmd: check if CRC strip offload supported") in
the repo dpdk-next-net.

v12:
fix coding style warning

v11:
This patch set is based on 18.05-rc2 .
document update according to feedback
revise rte_ethdev.h for doxygen

v10:
sorry, miss the code change, fix the buidling error

v9:
replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
to avoid failure of application which hasn't been completely
converted to new offload API.

v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  6 +++---
 lib/librte_ethdev/rte_ethdev.c          | 22 ++++++++++++----------
 lib/librte_ethdev/rte_ethdev.h          | 17 ++++++++++-------
 3 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index f4e0bcd..daa858d 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -303,12 +303,12 @@ In the DPDK offload API, offloads are divided into per-port and per-queue offloa
 * A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
 * A pure per-port offloading must be enabled or disabled on all queues at the same time.
 * Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
-* A per-port offloading can be enabled or disabled on all queues at the same time.
-* It is certain that both per-queue and pure per-port offloading are per-port type.
+* Port capabilities = pre-queue capabilities + pure per-port capabilities.
+* Any supported offloading can be enabled on all queues.
 
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
 The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
-The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
+The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 3528ba1..b3ed821 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1166,21 +1166,23 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	     local_conf.rxmode.offloads) {
 		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
 				"0x%" PRIx64 " doesn't match Rx offloads "
-				"capabilities 0x%" PRIx64 " in %s( )\n",
+				"capabilities 0x%" PRIx64 " in %s()\n",
 				port_id,
 				local_conf.rxmode.offloads,
 				dev_info.rx_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
 	     local_conf.txmode.offloads) {
 		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
 				"0x%" PRIx64 " doesn't match Tx offloads "
-				"capabilities 0x%" PRIx64 " in %s( )\n",
+				"capabilities 0x%" PRIx64 " in %s()\n",
 				port_id,
 				local_conf.txmode.offloads,
 				dev_info.tx_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 
 	/* Check that device supports requested rss hash functions. */
@@ -1556,29 +1558,29 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * The local_conf.offloads input to underlying PMD only carries
 	 * those offloadings which are only enabled on this queue and
 	 * not enabled on all queues.
-	 * The underlying PMD must be aware of this point.
 	 */
 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * New added offloadings for this queue are those not enabled in
-	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * rte_eth_dev_configure() and they must be per-queue type.
 	 * A pure per-port offloading can't be enabled on a queue while
 	 * disabled on another queue. A pure per-port offloading can't
 	 * be enabled for any queue as new added one if it hasn't been
-	 * enabled in rte_eth_dev_configure( ).
+	 * enabled in rte_eth_dev_configure().
 	 */
 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
 	     local_conf.offloads) {
 		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
 				"added offloads 0x%" PRIx64 " must be "
 				"within pre-queue offload capabilities 0x%"
-				PRIx64 " in %s( )\n",
+				PRIx64 " in %s()\n",
 				port_id,
 				rx_queue_id,
 				local_conf.offloads,
 				dev_info.rx_queue_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
@@ -1721,29 +1723,29 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	 * The local_conf.offloads input to underlying PMD only carries
 	 * those offloadings which are only enabled on this queue and
 	 * not enabled on all queues.
-	 * The underlying PMD must be aware of this point.
 	 */
 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * New added offloadings for this queue are those not enabled in
-	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * rte_eth_dev_configure() and they must be per-queue type.
 	 * A pure per-port offloading can't be enabled on a queue while
 	 * disabled on another queue. A pure per-port offloading can't
 	 * be enabled for any queue as new added one if it hasn't been
-	 * enabled in rte_eth_dev_configure( ).
+	 * enabled in rte_eth_dev_configure().
 	 */
 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
 	     local_conf.offloads) {
 		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
 				"added offloads 0x%" PRIx64 " must be "
 				"within pre-queue offload capabilities 0x%"
-				PRIx64 " in %s( )\n",
+				PRIx64 " in %s()\n",
 				port_id,
 				tx_queue_id,
 				local_conf.offloads,
 				dev_info.tx_queue_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index 78e12bf..3a7428e 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -1067,13 +1067,18 @@ struct rte_eth_dev_info {
 	uint16_t max_vfs; /**< Maximum number of VFs. */
 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
 	uint64_t rx_offload_capa;
-	/**< All RX offload capabilities including all per queue ones */
+	/**<
+	 * All RX offload capabilities including all per-queue ones.
+	 * Any flag in [rt]x_offload_capa and [rt]x_queue_offload_capa
+	 * of this structure needn't be repeated in rte_eth_[rt]x_queue_setup().
+	 * A flag enabled at port level can't be disabled at queue level.
+	 */
 	uint64_t tx_offload_capa;
-	/**< All TX offload capabilities.including all per-queue ones */
+	/**< All TX offload capabilities including all per-queue ones */
 	uint64_t rx_queue_offload_capa;
-	/**< Device per queue RX offload capabilities. */
+	/**< Device per-queue RX offload capabilities. */
 	uint64_t tx_queue_offload_capa;
-	/**< Device per queue TX offload capabilities. */
+	/**< Device per-queue TX offload capabilities. */
 	uint16_t reta_size;
 	/**< Device redirection table size, the total number of entries. */
 	uint8_t hash_key_size; /**< Hash key size in bytes */
@@ -1554,9 +1559,7 @@ const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
  *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
  *        Any type of device supported offloading set in the input argument
  *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
- *        on all [RT]x queues and it can't be disabled no matter whether
- *        it is cleared or set in the input argument [rt]x_conf->offloads
- *        to rte_eth_[rt]x_queue_setup().
+ *        on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup().
  *     - the Receive Side Scaling (RSS) configuration when using multiple RX
  *         queues per port.
  *
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v13] ethdev: new Rx/Tx offloads API
  2018-05-14 12:00                     ` [dpdk-dev] [PATCH v13] " Wei Dai
@ 2018-05-14 12:54                       ` Thomas Monjalon
  2018-05-14 13:26                         ` Dai, Wei
  2018-05-14 13:20                       ` [dpdk-dev] [PATCH v14] " Wei Dai
  1 sibling, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-05-14 12:54 UTC (permalink / raw)
  To: Wei Dai; +Cc: dev, ferruh.yigit, Qi Zhang

14/05/2018 14:00, Wei Dai:
> --- a/doc/guides/prog_guide/poll_mode_drv.rst
> +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> @@ -303,12 +303,12 @@ In the DPDK offload API, offloads are divided into per-port and per-queue offloa
>  * A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
>  * A pure per-port offloading must be enabled or disabled on all queues at the same time.
>  * Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
> -* A per-port offloading can be enabled or disabled on all queues at the same time.
> -* It is certain that both per-queue and pure per-port offloading are per-port type.
> +* Port capabilities = pre-queue capabilities + pure per-port capabilities.

s/pre/per/

> +* Any supported offloading can be enabled on all queues.
>  
>  The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
>  The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> -The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
> +The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.

OK


> @@ -1556,29 +1558,29 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  	 * The local_conf.offloads input to underlying PMD only carries
>  	 * those offloadings which are only enabled on this queue and
>  	 * not enabled on all queues.
> -	 * The underlying PMD must be aware of this point.
>  	 */

OK


> --- a/lib/librte_ethdev/rte_ethdev.h
> +++ b/lib/librte_ethdev/rte_ethdev.h
> @@ -1067,13 +1067,18 @@ struct rte_eth_dev_info {
>  	uint16_t max_vfs; /**< Maximum number of VFs. */
>  	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
>  	uint64_t rx_offload_capa;
> -	/**< All RX offload capabilities including all per queue ones */
> +	/**<
> +	 * All RX offload capabilities including all per-queue ones.
> +	 * Any flag in [rt]x_offload_capa and [rt]x_queue_offload_capa
> +	 * of this structure needn't be repeated in rte_eth_[rt]x_queue_setup().

It is confusing. Better to remove this sentence about queue_setup
in port capa comment.

> +	 * A flag enabled at port level can't be disabled at queue level.

This one too: it is a comment about port capa, not queue setup.


> @@ -1554,9 +1559,7 @@ const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
>   *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
>   *        Any type of device supported offloading set in the input argument
>   *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
> - *        on all [RT]x queues and it can't be disabled no matter whether
> - *        it is cleared or set in the input argument [rt]x_conf->offloads
> - *        to rte_eth_[rt]x_queue_setup().
> + *        on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup().

OK


Missing: we must explain the "no repeat need" and
"no disable port offload on queue" constraint.
In the last review, I was suggesting such sentences:
        No need to repeat flags already enabled at port level.
        A flag enabled at port level, cannot be disabled at queue level.
I think it should go in queue setup comments.

Opinion?

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [dpdk-dev] [PATCH v14] ethdev: new Rx/Tx offloads API
  2018-05-14 12:00                     ` [dpdk-dev] [PATCH v13] " Wei Dai
  2018-05-14 12:54                       ` Thomas Monjalon
@ 2018-05-14 13:20                       ` Wei Dai
  2018-05-14 14:11                         ` Thomas Monjalon
  1 sibling, 1 reply; 60+ messages in thread
From: Wei Dai @ 2018-05-14 13:20 UTC (permalink / raw)
  To: thomas, ferruh.yigit; +Cc: dev, Wei Dai, Qi Zhang

This patch check if a input requested offloading is valid or not.
Any reuqested offloading must be supported in the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If any offloading is enabled in rte_eth_dev_configure() by application,
it is enabled on all queues no matter whether it is per-queue or
per-port type and no matter whether it is set or cleared in
[rt]x_conf->offloads to rte_eth_[rt]x_queue_setup().
If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(),
it can be enabled or disabled for individual queue in
ret_eth_[rt]x_queue_setup().
A new added offloading is the one which hasn't been enabled in
rte_eth_dev_configure() and is reuqested to be enabled in
rte_eth_[rt]x_queue_setup(), it must be per-queue type,
otherwise trigger an error log.
The underlying PMD must be aware that the requested offloadings
to PMD specific queue_setup() function only carries those
new added offloadings of per-queue type.

This patch can make above such checking in a common way in rte_ethdev
layer to avoid same checking in underlying PMD.

This patch assumes that all PMDs in 18.05-rc2 have already
converted to offload API defined in 17.11 . It also assumes
that all PMDs can return correct offloading capabilities
in rte_eth_dev_infos_get().

In the beginning of [rt]x_queue_setup() of underlying PMD,
add offloads = [rt]xconf->offloads |
dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API
defined in 17.11 to avoid upper application broken due to offload
API change.
PMD can use the info that input [rt]xconf->offloads only carry
the new added per-queue offloads to do some optimization or some
code change on base of this patch.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

---
v14:
rework on v13 according to feedback.
This v14 is also based on commit dc33238da4be
("app/testpmd: check if CRC strip offload supported") in
the repo dpdk-next-net.

v13:
only rework on v12 according to feedback.
This version is based on commit dc33238da4be
("app/testpmd: check if CRC strip offload supported") in
the repo dpdk-next-net.

v12:
fix coding style warning

v11:
This patch set is based on 18.05-rc2 .
document update according to feedback
revise rte_ethdev.h for doxygen

v10:
sorry, miss the code change, fix the buidling error

v9:
replace RTE_PMD_DEBUG_TRACE with ethdev_log(ERR, in ethdev
to avoid failure of application which hasn't been completely
converted to new offload API.

v8:
Revise PMD codes to comply with offload API in v7
update document

v7:
Give the maximum freedom for upper application,
only minimal checking is performed in ethdev layer.
Only requested specific pure per-queue offloadings are input
to underlying PMD.

v6:
No need enable an offload in queue_setup( ) if it has already
been enabled in dev_configure( )

v5:
keep offload settings sent to PMD same as those from application

v4:
fix a wrong description in git log message.

v3:
rework according to dicision of offloading API in community

v2:
add offloads checking in rte_eth_dev_configure( ).
check if a requested offloading is supported.
---
 doc/guides/prog_guide/poll_mode_drv.rst |  6 +++---
 lib/librte_ethdev/rte_ethdev.c          | 22 ++++++++++++----------
 lib/librte_ethdev/rte_ethdev.h          | 18 +++++++++++-------
 3 files changed, 26 insertions(+), 20 deletions(-)

diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index f4e0bcd..af82352 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -303,12 +303,12 @@ In the DPDK offload API, offloads are divided into per-port and per-queue offloa
 * A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
 * A pure per-port offloading must be enabled or disabled on all queues at the same time.
 * Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
-* A per-port offloading can be enabled or disabled on all queues at the same time.
-* It is certain that both per-queue and pure per-port offloading are per-port type.
+* Port capabilities = per-queue capabilities + pure per-port capabilities.
+* Any supported offloading can be enabled on all queues.
 
 The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
 The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
-The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading capabilities.
+The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
 Supported offloads can be either per-port or per-queue.
 
 Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 3528ba1..b3ed821 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1166,21 +1166,23 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	     local_conf.rxmode.offloads) {
 		ethdev_log(ERR, "ethdev port_id=%d requested Rx offloads "
 				"0x%" PRIx64 " doesn't match Rx offloads "
-				"capabilities 0x%" PRIx64 " in %s( )\n",
+				"capabilities 0x%" PRIx64 " in %s()\n",
 				port_id,
 				local_conf.rxmode.offloads,
 				dev_info.rx_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 	if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
 	     local_conf.txmode.offloads) {
 		ethdev_log(ERR, "ethdev port_id=%d requested Tx offloads "
 				"0x%" PRIx64 " doesn't match Tx offloads "
-				"capabilities 0x%" PRIx64 " in %s( )\n",
+				"capabilities 0x%" PRIx64 " in %s()\n",
 				port_id,
 				local_conf.txmode.offloads,
 				dev_info.tx_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 
 	/* Check that device supports requested rss hash functions. */
@@ -1556,29 +1558,29 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 * The local_conf.offloads input to underlying PMD only carries
 	 * those offloadings which are only enabled on this queue and
 	 * not enabled on all queues.
-	 * The underlying PMD must be aware of this point.
 	 */
 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
 
 	/*
 	 * New added offloadings for this queue are those not enabled in
-	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * rte_eth_dev_configure() and they must be per-queue type.
 	 * A pure per-port offloading can't be enabled on a queue while
 	 * disabled on another queue. A pure per-port offloading can't
 	 * be enabled for any queue as new added one if it hasn't been
-	 * enabled in rte_eth_dev_configure( ).
+	 * enabled in rte_eth_dev_configure().
 	 */
 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
 	     local_conf.offloads) {
 		ethdev_log(ERR, "Ethdev port_id=%d rx_queue_id=%d, new "
 				"added offloads 0x%" PRIx64 " must be "
 				"within pre-queue offload capabilities 0x%"
-				PRIx64 " in %s( )\n",
+				PRIx64 " in %s()\n",
 				port_id,
 				rx_queue_id,
 				local_conf.offloads,
 				dev_info.rx_queue_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
@@ -1721,29 +1723,29 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	 * The local_conf.offloads input to underlying PMD only carries
 	 * those offloadings which are only enabled on this queue and
 	 * not enabled on all queues.
-	 * The underlying PMD must be aware of this point.
 	 */
 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
 
 	/*
 	 * New added offloadings for this queue are those not enabled in
-	 * rte_eth_dev_configure( ) and they must be per-queue type.
+	 * rte_eth_dev_configure() and they must be per-queue type.
 	 * A pure per-port offloading can't be enabled on a queue while
 	 * disabled on another queue. A pure per-port offloading can't
 	 * be enabled for any queue as new added one if it hasn't been
-	 * enabled in rte_eth_dev_configure( ).
+	 * enabled in rte_eth_dev_configure().
 	 */
 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
 	     local_conf.offloads) {
 		ethdev_log(ERR, "Ethdev port_id=%d tx_queue_id=%d, new "
 				"added offloads 0x%" PRIx64 " must be "
 				"within pre-queue offload capabilities 0x%"
-				PRIx64 " in %s( )\n",
+				PRIx64 " in %s()\n",
 				port_id,
 				tx_queue_id,
 				local_conf.offloads,
 				dev_info.tx_queue_offload_capa,
 				__func__);
+		/* Will return -EINVAL in the next release */
 	}
 
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index 78e12bf..6bef181 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -1067,13 +1067,13 @@ struct rte_eth_dev_info {
 	uint16_t max_vfs; /**< Maximum number of VFs. */
 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
 	uint64_t rx_offload_capa;
-	/**< All RX offload capabilities including all per queue ones */
+	/**< All RX offload capabilities including all per-queue ones */
 	uint64_t tx_offload_capa;
-	/**< All TX offload capabilities.including all per-queue ones */
+	/**< All TX offload capabilities including all per-queue ones */
 	uint64_t rx_queue_offload_capa;
-	/**< Device per queue RX offload capabilities. */
+	/**< Device per-queue RX offload capabilities. */
 	uint64_t tx_queue_offload_capa;
-	/**< Device per queue TX offload capabilities. */
+	/**< Device per-queue TX offload capabilities. */
 	uint16_t reta_size;
 	/**< Device redirection table size, the total number of entries. */
 	uint8_t hash_key_size; /**< Hash key size in bytes */
@@ -1554,9 +1554,7 @@ const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
  *        the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
  *        Any type of device supported offloading set in the input argument
  *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
- *        on all [RT]x queues and it can't be disabled no matter whether
- *        it is cleared or set in the input argument [rt]x_conf->offloads
- *        to rte_eth_[rt]x_queue_setup().
+ *        on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup().
  *     - the Receive Side Scaling (RSS) configuration when using multiple RX
  *         queues per port.
  *
@@ -1617,6 +1615,9 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   hasn't been set in the input argument eth_conf->rxmode.offloads
  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
  *   per-queue type and it is enabled for the queue.
+ *   No need to repeat any bit in rx_conf->offloads which has already been
+ *   enabled in rte_eth_dev_configure() at port level. An offloading enabled
+ *   at port level can't be disabled at queue level.
  * @param mb_pool
  *   The pointer to the memory pool from which to allocate *rte_mbuf* network
  *   memory buffers to populate each descriptor of the receive ring.
@@ -1679,6 +1680,9 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
  *     hasn't been set in the input argument eth_conf->txmode.offloads
  *     to rte_eth_dev_configure(), it is a new added offloading, it must be
  *     per-queue type and it is enabled for the queue.
+ *     No need to repeat any bit in tx_conf->offloads which has already been
+ *     enabled in rte_eth_dev_configure() at port level. An offloading enabled
+ *     at port level can't be disabled at queue level.
  *
  *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
  *     the transmit function to use default values.
-- 
2.7.5

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v13] ethdev: new Rx/Tx offloads API
  2018-05-14 12:54                       ` Thomas Monjalon
@ 2018-05-14 13:26                         ` Dai, Wei
  0 siblings, 0 replies; 60+ messages in thread
From: Dai, Wei @ 2018-05-14 13:26 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, Yigit, Ferruh, Zhang, Qi Z

Hi, Thomas
Thanks for your quick feedback.

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Monday, May 14, 2018 8:54 PM
> To: Dai, Wei <wei.dai@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v13] ethdev: new Rx/Tx offloads API
> 
> 14/05/2018 14:00, Wei Dai:
> > --- a/doc/guides/prog_guide/poll_mode_drv.rst
> > +++ b/doc/guides/prog_guide/poll_mode_drv.rst
> > @@ -303,12 +303,12 @@ In the DPDK offload API, offloads are divided
> > into per-port and per-queue offloa
> >  * A pure per-port offloading can't be enabled on a queue and disabled on
> another queue at the same time.
> >  * A pure per-port offloading must be enabled or disabled on all queues at
> the same time.
> >  * Any offloading is per-queue or pure per-port type, but can't be both
> types at same devices.
> > -* A per-port offloading can be enabled or disabled on all queues at the
> same time.
> > -* It is certain that both per-queue and pure per-port offloading are
> per-port type.
> > +* Port capabilities = pre-queue capabilities + pure per-port capabilities.
> 
> s/pre/per/
Sorry for the typo error.

> 
> > +* Any supported offloading can be enabled on all queues.
> >
> >  The different offloads capabilities can be queried using
> ``rte_eth_dev_info_get()``.
> >  The ``dev_info->[rt]x_queue_offload_capa`` returned from
> ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
> > -The ``dev_info->[rt]x_offload_capa`` returned from
> ``rte_eth_dev_info_get()`` includes all per-port and per-queue offloading
> capabilities.
> > +The ``dev_info->[rt]x_offload_capa`` returned from
> ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue
> offloading capabilities.
> 
> OK
> 
> 
> > @@ -1556,29 +1558,29 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> uint16_t rx_queue_id,
> >  	 * The local_conf.offloads input to underlying PMD only carries
> >  	 * those offloadings which are only enabled on this queue and
> >  	 * not enabled on all queues.
> > -	 * The underlying PMD must be aware of this point.
> >  	 */
> 
> OK
> 
> 
> > --- a/lib/librte_ethdev/rte_ethdev.h
> > +++ b/lib/librte_ethdev/rte_ethdev.h
> > @@ -1067,13 +1067,18 @@ struct rte_eth_dev_info {
> >  	uint16_t max_vfs; /**< Maximum number of VFs. */
> >  	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
> >  	uint64_t rx_offload_capa;
> > -	/**< All RX offload capabilities including all per queue ones */
> > +	/**<
> > +	 * All RX offload capabilities including all per-queue ones.
> > +	 * Any flag in [rt]x_offload_capa and [rt]x_queue_offload_capa
> > +	 * of this structure needn't be repeated in rte_eth_[rt]x_queue_setup().
> 
> It is confusing. Better to remove this sentence about queue_setup in port
> capa comment.
> 
> > +	 * A flag enabled at port level can't be disabled at queue level.
> 
> This one too: it is a comment about port capa, not queue setup.
> 
Sorry, I think I have a mistake about your feedback on v12.
Will remove above 2 sentences.

> 
> > @@ -1554,9 +1559,7 @@ const char * __rte_experimental
> rte_eth_dev_tx_offload_name(uint64_t offload);
> >   *        the [rt]x_offload_capa returned from
> rte_eth_dev_infos_get().
> >   *        Any type of device supported offloading set in the input
> argument
> >   *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is
> enabled
> > - *        on all [RT]x queues and it can't be disabled no matter whether
> > - *        it is cleared or set in the input argument [rt]x_conf->offloads
> > - *        to rte_eth_[rt]x_queue_setup().
> > + *        on all queues and it can't be disabled in
> rte_eth_[rt]x_queue_setup().
> 
> OK
> 
> 
> Missing: we must explain the "no repeat need" and "no disable port offload
> on queue" constraint.
> In the last review, I was suggesting such sentences:
>         No need to repeat flags already enabled at port level.
>         A flag enabled at port level, cannot be disabled at queue level.
> I think it should go in queue setup comments.
> 
> Opinion?
> 
Will add this in the comments of queue_setup( ) in rte_ethdev.h

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v14] ethdev: new Rx/Tx offloads API
  2018-05-14 13:20                       ` [dpdk-dev] [PATCH v14] " Wei Dai
@ 2018-05-14 14:11                         ` Thomas Monjalon
  2018-05-14 14:46                           ` Ferruh Yigit
  0 siblings, 1 reply; 60+ messages in thread
From: Thomas Monjalon @ 2018-05-14 14:11 UTC (permalink / raw)
  To: Wei Dai; +Cc: dev, ferruh.yigit, Qi Zhang

14/05/2018 15:20, Wei Dai:
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

[...]
>   *   hasn't been set in the input argument eth_conf->rxmode.offloads
>   *   to rte_eth_dev_configure(), it is a new added offloading, it must be
>   *   per-queue type and it is enabled for the queue.
> + *   No need to repeat any bit in rx_conf->offloads which has already been
> + *   enabled in rte_eth_dev_configure() at port level. An offloading enabled
> + *   at port level can't be disabled at queue level.

OK

Acked-by: Thomas Monjalon <thomas@monjalon.net>

Thanks a lot Wei!

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [dpdk-dev] [PATCH v14] ethdev: new Rx/Tx offloads API
  2018-05-14 14:11                         ` Thomas Monjalon
@ 2018-05-14 14:46                           ` Ferruh Yigit
  0 siblings, 0 replies; 60+ messages in thread
From: Ferruh Yigit @ 2018-05-14 14:46 UTC (permalink / raw)
  To: Thomas Monjalon, Wei Dai; +Cc: dev, Qi Zhang

On 5/14/2018 3:11 PM, Thomas Monjalon wrote:
> 14/05/2018 15:20, Wei Dai:
>> Signed-off-by: Wei Dai <wei.dai@intel.com>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> 
> [...]
>>   *   hasn't been set in the input argument eth_conf->rxmode.offloads
>>   *   to rte_eth_dev_configure(), it is a new added offloading, it must be
>>   *   per-queue type and it is enabled for the queue.
>> + *   No need to repeat any bit in rx_conf->offloads which has already been
>> + *   enabled in rte_eth_dev_configure() at port level. An offloading enabled
>> + *   at port level can't be disabled at queue level.
> 
> OK
> 
> Acked-by: Thomas Monjalon <thomas@monjalon.net>

Squashed into relevant commit in next-net, thanks.

> 
> Thanks a lot Wei!
> 
> 

^ permalink raw reply	[flat|nested] 60+ messages in thread

end of thread, other threads:[~2018-05-14 14:46 UTC | newest]

Thread overview: 60+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-01 13:53 [dpdk-dev] [PATCH] ethdev: check consistency of per port offloads Wei Dai
2018-03-28  8:57 ` [dpdk-dev] [PATCH v2] ethdev: check Rx/Tx offloads Wei Dai
2018-04-13 17:31   ` Ferruh Yigit
2018-04-15 10:37     ` Thomas Monjalon
2018-04-16  3:06       ` Dai, Wei
2018-04-25 11:26   ` [dpdk-dev] [PATCH] " Wei Dai
2018-04-25 11:31   ` [dpdk-dev] [PATCH v3] " Wei Dai
2018-04-25 11:49     ` Wei Dai
2018-04-25 11:50   ` [dpdk-dev] [PATCH v4] " Wei Dai
2018-04-25 17:04     ` Ferruh Yigit
2018-04-26  7:59       ` Zhang, Qi Z
2018-04-26  8:18         ` Thomas Monjalon
2018-04-26  8:51           ` Zhang, Qi Z
2018-04-26 14:45             ` Dai, Wei
2018-04-26 14:37     ` [dpdk-dev] [PATCH v5] " Wei Dai
2018-04-26 15:50       ` Ferruh Yigit
2018-04-26 15:56         ` Thomas Monjalon
2018-04-26 15:59           ` Ferruh Yigit
2018-04-26 16:11         ` Ferruh Yigit
2018-05-03  1:30       ` [dpdk-dev] [PATCH v6] " Wei Dai
2018-05-04 11:12         ` Ferruh Yigit
2018-05-04 14:02         ` [dpdk-dev] [PATCH v7] " Wei Dai
2018-05-04 14:42           ` Ferruh Yigit
2018-05-04 14:45             ` Ferruh Yigit
2018-05-05 18:59           ` Shahaf Shuler
2018-05-07  7:15             ` Dai, Wei
2018-05-08 10:58             ` Ferruh Yigit
2018-05-08 10:05           ` [dpdk-dev] [PATCH v8] " Wei Dai
2018-05-08 10:41             ` Andrew Rybchenko
2018-05-08 11:02               ` Ferruh Yigit
2018-05-08 11:22                 ` Andrew Rybchenko
2018-05-08 11:37             ` Andrew Rybchenko
2018-05-08 12:34               ` Dai, Wei
2018-05-08 12:12             ` Ferruh Yigit
2018-05-09 12:45               ` Dai, Wei
2018-05-10  0:49             ` [dpdk-dev] [PATCH v9] ethdev: new Rx/Tx offloads API Wei Dai
2018-05-10  0:56               ` [dpdk-dev] [PATCH v10] " Wei Dai
2018-05-10  1:28                 ` Ferruh Yigit
2018-05-10  2:35                 ` Thomas Monjalon
2018-05-10 11:27                   ` Dai, Wei
2018-05-10  9:25                 ` Andrew Rybchenko
2018-05-10 19:47                   ` Ferruh Yigit
2018-05-10 11:30                 ` [dpdk-dev] [PATCH v11] " Wei Dai
2018-05-10 11:56                   ` [dpdk-dev] [PATCH v12] " Wei Dai
2018-05-10 21:39                     ` Thomas Monjalon
2018-05-14  8:37                       ` Thomas Monjalon
2018-05-14 11:19                         ` Dai, Wei
2018-05-10 21:48                     ` Ferruh Yigit
2018-05-14 12:00                     ` [dpdk-dev] [PATCH v13] " Wei Dai
2018-05-14 12:54                       ` Thomas Monjalon
2018-05-14 13:26                         ` Dai, Wei
2018-05-14 13:20                       ` [dpdk-dev] [PATCH v14] " Wei Dai
2018-05-14 14:11                         ` Thomas Monjalon
2018-05-14 14:46                           ` Ferruh Yigit
2018-05-10 21:08                 ` [dpdk-dev] [PATCH v10] " Ferruh Yigit
2018-05-08 10:10           ` [dpdk-dev] [PATCH v8] ethdev: check Rx/Tx offloads Wei Dai
2018-05-08 17:51             ` Andrew Rybchenko
2018-05-09  2:10               ` Dai, Wei
2018-05-09 14:11               ` Ferruh Yigit
2018-05-09 22:40                 ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).