DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API
@ 2018-02-27 16:01 Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
                   ` (4 more replies)
  0 siblings, 5 replies; 28+ messages in thread
From: Wei Dai @ 2018-02-27 16:01 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

This patch set adds support of per queue VLAN strip offloading
in ixgbe PF and VF.
This patch support new offloads API in ixgbe PF and VF.

Wei Dai (4):
  net/ixgbe: support VLAN strip per queue offloading in PF
  net/ixgbe: support VLAN strip per queue offloading in VF
  net/ixgbe: convert to new Rx offloads API
  net/ixgbe: convert to new Tx offloads API

 drivers/net/ixgbe/ixgbe_ethdev.c          | 243 +++++++++++++-----------------
 drivers/net/ixgbe/ixgbe_ethdev.h          |   4 +-
 drivers/net/ixgbe/ixgbe_ipsec.c           |  13 +-
 drivers/net/ixgbe/ixgbe_pf.c              |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 209 ++++++++++++++++++++++---
 drivers/net/ixgbe/ixgbe_rxtx.h            |  12 ++
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 8 files changed, 318 insertions(+), 172 deletions(-)

-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF
  2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
@ 2018-02-27 16:01 ` Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-02-27 16:01 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in PF. With this patch
it can be enabled or disabled on any Rx queue in PF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 109 +++++++++++++++++----------------------
 drivers/net/ixgbe/ixgbe_ethdev.h |   4 +-
 drivers/net/ixgbe/ixgbe_pf.c     |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c   |   1 +
 drivers/net/ixgbe/ixgbe_rxtx.h   |   1 +
 5 files changed, 51 insertions(+), 69 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4483258..73755d2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2001,64 +2001,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 }
 
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl &= ~IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl &= ~IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
-		}
-	}
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl |= IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
-		}
-	}
-}
-
 static void
 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
@@ -2114,14 +2056,57 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	 */
 }
 
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	uint32_t ctrl;
+	uint16_t i;
+	struct ixgbe_rx_queue *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl |= IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		} else {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl &= ~IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		}
+	} else {
+		/*
+		 * Other 10G NIC, the VLAN strip can be setup
+		 * per queue in RXDCTL
+		 */
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+				ctrl |= IXGBE_RXDCTL_VME;
+				on = TRUE;
+			} else {
+				ctrl &= ~IXGBE_RXDCTL_VME;
+				on = FALSE;
+			}
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+			/* record those setting for HW strip per queue */
+			ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+		}
+	}
+}
+
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-			ixgbe_vlan_hw_strip_enable_all(dev);
-		else
-			ixgbe_vlan_hw_strip_disable_all(dev);
+		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d652..6550777 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -659,9 +659,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
 
 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
 
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
 
 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
 
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea99737..4e61310 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -329,10 +329,7 @@ set_rx_mode(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-		ixgbe_vlan_hw_strip_enable_all(dev);
-	else
-		ixgbe_vlan_hw_strip_disable_all(dev);
+	ixgbe_vlan_hw_strip_config(dev);
 }
 
 static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4..5c45eb4 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2820,6 +2820,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 							0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = rx_conf->offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718b..ab5f01e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
+	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF
  2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
@ 2018-02-27 16:01 ` Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-02-27 16:01 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in VF. With this patch
it can be enabled or disabled on any Rx queue in VF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 73755d2..8bb67ba 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5215,15 +5215,17 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_rx_queue *rxq;
 	uint16_t i;
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
-		for (i = 0; i < hw->mac.max_rx_queues; i++)
+		for (i = 0; i < hw->mac.max_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
+		}
 	}
 
 	return 0;
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API
  2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
@ 2018-02-27 16:01 ` Wei Dai
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-02-27 16:01 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c          |  88 +++++++++---------
 drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 143 ++++++++++++++++++++++++++----
 drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 6 files changed, 180 insertions(+), 66 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bb67ba..b9a23eb 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	if (mask & ETH_VLAN_STRIP_MASK) {
 		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
 	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2353,6 +2356,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 	adapter->rx_bulk_alloc_allowed = true;
 	adapter->rx_vec_allowed = true;
 
+	/*
+	 * Header split and VLAN strip are per queue offload features,
+	 * clear them first and set them if they are enabled on any Rx queue.
+	 * This is for set_rx_function() called later.
+	 */
+	if (dev->data->dev_conf.rxmode.ignore_offload_bitfield)
+		dev->data->dev_conf.rxmode.offloads &=
+			~(ixgbe_get_rx_port_offloads(dev));
+
 	return 0;
 }
 
@@ -3632,30 +3644,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_CRC_STRIP;
-
-	/*
-	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
-	 * mode.
-	 */
-	if ((hw->mac.type == ixgbe_mac_82599EB ||
-	     hw->mac.type == ixgbe_mac_X540) &&
-	    !RTE_ETH_DEV_SRIOV(dev).active)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -3675,10 +3666,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	if (dev->security_ctx)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	}
 #endif
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3689,6 +3678,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3781,11 +3771,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		dev_info->max_vmdq_pools = ETH_16_POOLS;
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM  |
-				DEV_RX_OFFLOAD_CRC_STRIP;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -3801,6 +3789,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -4894,10 +4883,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4955,14 +4946,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -4973,6 +4964,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	adapter->rx_bulk_alloc_allowed = true;
 	adapter->rx_vec_allowed = true;
 
+	/*
+	 * Header split and VLAN strip are per queue offload features,
+	 * clear them first and set them if they are enabled on any Rx queue.
+	 * This is for set_rx_function() called later.
+	 */
+	if (conf->rxmode.ignore_offload_bitfield)
+		conf->rxmode.offloads &= ~(ixgbe_get_rx_queue_offloads(dev));
+
 	return 0;
 }
 
@@ -5850,6 +5849,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 			   uint16_t queue_idx, uint16_t tx_rate)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rf_dec, rf_int;
 	uint32_t bcnrc_val;
 	uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5871,14 +5871,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 		bcnrc_val = 0;
 	}
 
+	rxmode = &dev->data->dev_conf.rxmode;
 	/*
 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
 	 * set as 0x4.
 	 */
-	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-				IXGBE_MAX_JUMBO_FRAME_SIZE))
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
 			IXGBE_MMW_SIZE_JUMBO_FRAME);
 	else
@@ -6225,7 +6225,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	/* refuse mtu that requires the support of scattered packets when this
 	 * feature has not been enabled before.
 	 */
-	if (!rx_conf->enable_scatter &&
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
 	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
 		return -EINVAL;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0f..29e4728 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
+	uint64_t rx_offloads;
 
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	/* sanity checks */
-	if (dev->data->dev_conf.rxmode.enable_lro) {
+	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5c45eb4..2b4864b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2769,6 +2769,91 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
 #endif
 }
 
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599_vf:
+	case ixgbe_mac_X540_vf:
+	case ixgbe_mac_X550_vf:
+	case ixgbe_mac_X550EM_x_vf:
+	case ixgbe_mac_X550EM_a_vf:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
+	if (hw->mac.type != ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		   DEV_RX_OFFLOAD_UDP_CKSUM   |
+		   DEV_RX_OFFLOAD_TCP_CKSUM   |
+		   DEV_RX_OFFLOAD_CRC_STRIP   |
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+		   DEV_RX_OFFLOAD_SCATTER;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	if (ixgbe_is_vf(dev) == 0)
+		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+	/*
+	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+	 * mode.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB ||
+	     hw->mac.type == ixgbe_mac_X540) &&
+	    !RTE_ETH_DEV_SRIOV(dev).active)
+		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+	return offloads;
+}
+
+static int
+ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+	uint64_t supported = ixgbe_get_rx_port_offloads(dev);
+
+	return !((mandatory ^ requested) & supported);
+}
+
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2787,6 +2872,17 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port  offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			(ixgbe_get_rx_port_offloads(dev) |
+			 ixgbe_get_rx_queue_offloads(dev)));
+		return -ENOTSUP;
+	}
+
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
@@ -2816,8 +2912,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-							0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 	rxq->offloads = rx_conf->offloads;
@@ -4575,7 +4671,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && rx_conf->enable_lro) {
+	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4583,7 +4679,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4597,7 +4694,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->enable_lro))
+	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		/*
 		 * Since NFS packets coalescing is not supported - clear
 		 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4610,7 +4707,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!rx_conf->enable_lro)
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -4730,7 +4827,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->hw_strip_crc)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4738,7 +4835,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->jumbo_frame == 1) {
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 		maxfrs &= 0x0000FFFF;
@@ -4766,7 +4863,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+		rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+				0 : ETHER_CRC_LEN;
 
 		/* Setup the Base and Length of the Rx Descriptor Rings */
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -4784,7 +4882,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/*
 		 * Configure Header Split
 		 */
-		if (rx_conf->header_split) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+			/* add Header Split flag for set_rx_function( ) */
+			rx_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
 			if (hw->mac.type == ixgbe_mac_82599EB) {
 				/* Must setup the PSRTYPE register */
 				uint32_t psrtype;
@@ -4827,9 +4927,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->enable_scatter)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4844,7 +4946,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->hw_ip_checksum)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4854,7 +4956,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->hw_strip_crc)
+		if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5260,6 +5362,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
@@ -5290,6 +5393,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
 	struct ixgbe_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint64_t bus_addr;
 	uint32_t srrctl, psrtype = 0;
 	uint16_t buf_size;
@@ -5356,7 +5460,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		/*
 		 * Configure Header Split
 		 */
-		if (dev->data->dev_conf.rxmode.header_split) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+			/* add Header Split flag for set_rx_function( ) */
+			rxmode->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
 			srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
 				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 				IXGBE_SRRCTL_BSIZEHDR_MASK);
@@ -5388,18 +5494,21 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (dev->data->dev_conf.rxmode.enable_scatter ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
-		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+		    (rxmode->max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->data->scattered_rx = 1;
 		}
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 #ifdef RTE_HEADER_SPLIT_ENABLE
-	if (dev->data->dev_conf.rxmode.header_split)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
 		/* Must setup the PSRTYPE register */
 		psrtype = IXGBE_PSRTYPE_TCPHDR |
 			IXGBE_PSRTYPE_UDPHDR   |
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ab5f01e..30095fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a..d3eb060 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -286,7 +286,7 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->header_split == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998..edb1383 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
 	/* no csum error report support */
-	if (rxmode->hw_ip_checksum == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		return -1;
 
 	return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx offloads API
  2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
                   ` (2 preceding siblings ...)
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
@ 2018-02-27 16:01 ` Wei Dai
  2018-03-14 23:18   ` Ananyev, Konstantin
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
  4 siblings, 1 reply; 28+ messages in thread
From: Wei Dai @ 2018-02-27 16:01 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 40 +++++++------------------
 drivers/net/ixgbe/ixgbe_ipsec.c  |  5 +++-
 drivers/net/ixgbe/ixgbe_rxtx.c   | 65 +++++++++++++++++++++++++++++++++++++---
 drivers/net/ixgbe/ixgbe_rxtx.h   |  8 +++++
 4 files changed, 83 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b9a23eb..1f4881e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -3647,28 +3647,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+	dev_info->tx_queue_offload_capa = 0;
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offlaods(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3690,7 +3670,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -3774,12 +3756,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = 0;
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offlaods(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3801,7 +3779,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 29e4728..de7ed36 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+
 	/* sanity checks */
 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2b4864b..45b5db6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+	if (((txq->offloads & IXGBE_SIMPLE_TX_OFFLOAD_FLAGS) == 0) &&
 #ifdef RTE_LIBRTE_SECURITY
 			!(txq->using_ipsec) &&
 #endif
@@ -2398,9 +2398,10 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	} else {
 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
 		PMD_INIT_LOG(DEBUG,
-				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
-				(unsigned long)txq->txq_flags,
-				(unsigned long)IXGBE_SIMPLE_FLAGS);
+				" - offloads = 0x%" PRIx64
+				" [IXGBE_SIMPLE_TX_OFFLOAD_FLAGS=0x%" PRIx64 "]",
+				txq->offloads,
+				IXGBE_SIMPLE_TX_OFFLOAD_FLAGS);
 		PMD_INIT_LOG(DEBUG,
 				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
 				(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2411,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	}
 }
 
+uint64_t
+ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+	return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requessted)
+{
+	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+	uint64_t supported = ixgbe_get_tx_port_offlaods(dev);
+
+	return !((mandatory ^ requessted) & supported);
+}
+
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2426,6 +2466,21 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
+	 * Don't verify port offloads for application which
+	 * use the old API.
+	 */
+	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+	    !ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			ixgbe_get_tx_port_offlaods(dev));
+		return -ENOTSUP;
+	}
+
+	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
@@ -2551,6 +2606,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
+	txq->offloads = tx_conf->offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -5382,6 +5438,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
 	qinfo->conf.txq_flags = txq->txq_flags;
+	qinfo->conf.offloads = txq->offloads;
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 30095fa..d7f0535 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -254,6 +255,12 @@ struct ixgbe_txq_ops {
 #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
 			    ETH_TXQ_FLAGS_NOOFFLOADS)
 
+#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
+					DEV_TX_OFFLOAD_VLAN_INSERT |\
+					DEV_TX_OFFLOAD_SCTP_CKSUM |\
+					DEV_TX_OFFLOAD_UDP_CKSUM |\
+					DEV_TX_OFFLOAD_TCP_CKSUM)
+
 /*
  * Populate descriptors with the following info:
  * 1.) buffer_addr = phys_addr + headroom
@@ -307,6 +314,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new offloads API
  2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
                   ` (3 preceding siblings ...)
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
@ 2018-03-07 13:06 ` Wei Dai
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
                     ` (4 more replies)
  4 siblings, 5 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-07 13:06 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

This patch set adds support of per queue VLAN strip offloading
in ixgbe PF and VF.
This patch support new offloads API in ixgbe PF and VF.

---
v2: improve error checking

Wei Dai (4):
  net/ixgbe: support VLAN strip per queue offloading in PF
  net/ixgbe: support VLAN strip per queue offloading in VF
  net/ixgbe: convert to new Rx offloads API
  net/ixgbe: convert to new Tx offloads API

 drivers/net/ixgbe/ixgbe_ethdev.c          | 264 ++++++++++++++----------------
 drivers/net/ixgbe/ixgbe_ethdev.h          |   4 +-
 drivers/net/ixgbe/ixgbe_ipsec.c           |  13 +-
 drivers/net/ixgbe/ixgbe_pf.c              |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 245 ++++++++++++++++++++++++---
 drivers/net/ixgbe/ixgbe_rxtx.h            |  13 ++
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 8 files changed, 376 insertions(+), 172 deletions(-)

-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
@ 2018-03-07 13:06   ` Wei Dai
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-07 13:06 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in PF. With this patch
it can be enabled or disabled on any Rx queue in PF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 109 +++++++++++++++++----------------------
 drivers/net/ixgbe/ixgbe_ethdev.h |   4 +-
 drivers/net/ixgbe/ixgbe_pf.c     |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c   |   1 +
 drivers/net/ixgbe/ixgbe_rxtx.h   |   1 +
 5 files changed, 51 insertions(+), 69 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4483258..73755d2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2001,64 +2001,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 }
 
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl &= ~IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl &= ~IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
-		}
-	}
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl |= IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
-		}
-	}
-}
-
 static void
 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
@@ -2114,14 +2056,57 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	 */
 }
 
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	uint32_t ctrl;
+	uint16_t i;
+	struct ixgbe_rx_queue *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl |= IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		} else {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl &= ~IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		}
+	} else {
+		/*
+		 * Other 10G NIC, the VLAN strip can be setup
+		 * per queue in RXDCTL
+		 */
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+				ctrl |= IXGBE_RXDCTL_VME;
+				on = TRUE;
+			} else {
+				ctrl &= ~IXGBE_RXDCTL_VME;
+				on = FALSE;
+			}
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+			/* record those setting for HW strip per queue */
+			ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+		}
+	}
+}
+
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-			ixgbe_vlan_hw_strip_enable_all(dev);
-		else
-			ixgbe_vlan_hw_strip_disable_all(dev);
+		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d652..6550777 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -659,9 +659,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
 
 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
 
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
 
 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
 
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea99737..4e61310 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -329,10 +329,7 @@ set_rx_mode(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-		ixgbe_vlan_hw_strip_enable_all(dev);
-	else
-		ixgbe_vlan_hw_strip_disable_all(dev);
+	ixgbe_vlan_hw_strip_config(dev);
 }
 
 static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4..5c45eb4 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2820,6 +2820,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 							0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = rx_conf->offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718b..ab5f01e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
+	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
@ 2018-03-07 13:06   ` Wei Dai
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-07 13:06 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in VF. With this patch
it can be enabled or disabled on any Rx queue in VF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 73755d2..8bb67ba 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5215,15 +5215,17 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_rx_queue *rxq;
 	uint16_t i;
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
-		for (i = 0; i < hw->mac.max_rx_queues; i++)
+		for (i = 0; i < hw->mac.max_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
+		}
 	}
 
 	return 0;
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
@ 2018-03-07 13:06   ` Wei Dai
  2018-03-14 21:47     ` Ananyev, Konstantin
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
  4 siblings, 1 reply; 28+ messages in thread
From: Wei Dai @ 2018-03-07 13:06 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
 drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 163 ++++++++++++++++++++++++++----
 drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 6 files changed, 205 insertions(+), 66 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bb67ba..9437f05 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	if (mask & ETH_VLAN_STRIP_MASK) {
 		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
 	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	ixgbe_dev_info_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_CRC_STRIP;
-
-	/*
-	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
-	 * mode.
-	 */
-	if ((hw->mac.type == ixgbe_mac_82599EB ||
-	     hw->mac.type == ixgbe_mac_X540) &&
-	    !RTE_ETH_DEV_SRIOV(dev).active)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	if (dev->security_ctx)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	}
 #endif
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		dev_info->max_vmdq_pools = ETH_16_POOLS;
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM  |
-				DEV_RX_OFFLOAD_CRC_STRIP;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
+	ixgbevf_dev_info_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 			   uint16_t queue_idx, uint16_t tx_rate)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rf_dec, rf_int;
 	uint32_t bcnrc_val;
 	uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 		bcnrc_val = 0;
 	}
 
+	rxmode = &dev->data->dev_conf.rxmode;
 	/*
 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
 	 * set as 0x4.
 	 */
-	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-				IXGBE_MAX_JUMBO_FRAME_SIZE))
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
 			IXGBE_MMW_SIZE_JUMBO_FRAME);
 	else
@@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	/* refuse mtu that requires the support of scattered packets when this
 	 * feature has not been enabled before.
 	 */
-	if (!rx_conf->enable_scatter &&
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
 	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
 		return -EINVAL;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0f..29e4728 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
+	uint64_t rx_offloads;
 
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	/* sanity checks */
-	if (dev->data->dev_conf.rxmode.enable_lro) {
+	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5c45eb4..a5d4822 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
 #endif
 }
 
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599_vf:
+	case ixgbe_mac_X540_vf:
+	case ixgbe_mac_X550_vf:
+	case ixgbe_mac_X550EM_x_vf:
+	case ixgbe_mac_X550EM_a_vf:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
+	if (hw->mac.type != ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		   DEV_RX_OFFLOAD_UDP_CKSUM   |
+		   DEV_RX_OFFLOAD_TCP_CKSUM   |
+		   DEV_RX_OFFLOAD_CRC_STRIP   |
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+		   DEV_RX_OFFLOAD_SCATTER;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	if (ixgbe_is_vf(dev) == 0)
+		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+	/*
+	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+	 * mode.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB ||
+	     hw->mac.type == ixgbe_mac_X540) &&
+	    !RTE_ETH_DEV_SRIOV(dev).active)
+		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+	return offloads;
+}
+
+static int
+ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
+	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2787,6 +2879,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			ixgbe_get_rx_port_offloads(dev),
+			ixgbe_get_rx_queue_offloads(dev));
+		return -ENOTSUP;
+	}
+
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
@@ -2816,8 +2920,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-							0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 	rxq->offloads = rx_conf->offloads;
@@ -4575,7 +4679,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && rx_conf->enable_lro) {
+	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4583,7 +4687,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4597,7 +4702,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->enable_lro))
+	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		/*
 		 * Since NFS packets coalescing is not supported - clear
 		 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4610,7 +4715,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!rx_conf->enable_lro)
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -4730,7 +4835,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->hw_strip_crc)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4738,7 +4843,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->jumbo_frame == 1) {
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 		maxfrs &= 0x0000FFFF;
@@ -4758,6 +4863,12 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rx_conf->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+			       DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -4766,7 +4877,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+		rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+				0 : ETHER_CRC_LEN;
 
 		/* Setup the Base and Length of the Rx Descriptor Rings */
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -4784,7 +4896,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/*
 		 * Configure Header Split
 		 */
-		if (rx_conf->header_split) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+			/* add Header Split flag for set_rx_function( ) */
+			rx_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
 			if (hw->mac.type == ixgbe_mac_82599EB) {
 				/* Must setup the PSRTYPE register */
 				uint32_t psrtype;
@@ -4827,9 +4941,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->enable_scatter)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4844,7 +4960,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->hw_ip_checksum)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4854,7 +4970,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->hw_strip_crc)
+		if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5260,6 +5376,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
@@ -5290,6 +5407,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
 	struct ixgbe_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint64_t bus_addr;
 	uint32_t srrctl, psrtype = 0;
 	uint16_t buf_size;
@@ -5329,6 +5447,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	ixgbevf_rlpml_set_vf(hw,
 		(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
 
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rxmode->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+			      DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5356,7 +5480,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		/*
 		 * Configure Header Split
 		 */
-		if (dev->data->dev_conf.rxmode.header_split) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+			/* add Header Split flag for set_rx_function( ) */
+			rxmode->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
 			srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
 				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 				IXGBE_SRRCTL_BSIZEHDR_MASK);
@@ -5388,18 +5514,21 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (dev->data->dev_conf.rxmode.enable_scatter ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
-		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+		    (rxmode->max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->data->scattered_rx = 1;
 		}
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 #ifdef RTE_HEADER_SPLIT_ENABLE
-	if (dev->data->dev_conf.rxmode.header_split)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
 		/* Must setup the PSRTYPE register */
 		psrtype = IXGBE_PSRTYPE_TCPHDR |
 			IXGBE_PSRTYPE_UDPHDR   |
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ab5f01e..30095fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a..d3eb060 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -286,7 +286,7 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->header_split == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998..edb1383 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
 	/* no csum error report support */
-	if (rxmode->hw_ip_checksum == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		return -1;
 
 	return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx offloads API
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
                     ` (2 preceding siblings ...)
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
@ 2018-03-07 13:06   ` Wei Dai
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-07 13:06 UTC (permalink / raw)
  To: wenzhuo.lu, konstantin.ananyev; +Cc: dev, Wei Dai

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 56 +++++++++++++--------------
 drivers/net/ixgbe/ixgbe_ipsec.c  |  5 ++-
 drivers/net/ixgbe/ixgbe_rxtx.c   | 81 ++++++++++++++++++++++++++++++++++++++--
 drivers/net/ixgbe/ixgbe_rxtx.h   |  9 +++++
 4 files changed, 116 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 9437f05..6288690 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2337,6 +2337,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		(struct ixgbe_adapter *)dev->data->dev_private;
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2356,6 +2357,13 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3649,28 +3657,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3692,7 +3680,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -3776,12 +3766,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3803,7 +3789,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -4941,6 +4929,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 			(struct ixgbe_adapter *)dev->data->dev_private;
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
@@ -4953,6 +4942,13 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 29e4728..de7ed36 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+
 	/* sanity checks */
 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a5d4822..8103b3b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+	if (((txq->offloads & IXGBE_SIMPLE_TX_OFFLOAD_FLAGS) == 0) &&
 #ifdef RTE_LIBRTE_SECURITY
 			!(txq->using_ipsec) &&
 #endif
@@ -2398,9 +2398,10 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	} else {
 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
 		PMD_INIT_LOG(DEBUG,
-				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
-				(unsigned long)txq->txq_flags,
-				(unsigned long)IXGBE_SIMPLE_FLAGS);
+				" - offloads = 0x%" PRIx64
+				" [IXGBE_SIMPLE_TX_OFFLOAD_FLAGS=0x%" PRIx64 "]",
+				txq->offloads,
+				IXGBE_SIMPLE_TX_OFFLOAD_FLAGS);
 		PMD_INIT_LOG(DEBUG,
 				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
 				(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2411,60 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	}
 }
 
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+	return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
+	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2426,6 +2481,22 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
+	 * Don't verify port offloads for application which
+	 * use the old API.
+	 */
+	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64,
+			(void *)dev, tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			ixgbe_get_tx_queue_offloads(dev),
+			ixgbe_get_tx_port_offloads(dev));
+		return -ENOTSUP;
+	}
+
+	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
@@ -2551,6 +2622,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
+	txq->offloads = tx_conf->offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -5396,6 +5468,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
 	qinfo->conf.txq_flags = txq->txq_flags;
+	qinfo->conf.offloads = txq->offloads;
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 30095fa..032dfb1 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -254,6 +255,12 @@ struct ixgbe_txq_ops {
 #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
 			    ETH_TXQ_FLAGS_NOOFFLOADS)
 
+#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
+					DEV_TX_OFFLOAD_VLAN_INSERT |\
+					DEV_TX_OFFLOAD_SCTP_CKSUM |\
+					DEV_TX_OFFLOAD_UDP_CKSUM |\
+					DEV_TX_OFFLOAD_TCP_CKSUM)
+
 /*
  * Populate descriptors with the following info:
  * 1.) buffer_addr = phys_addr + headroom
@@ -307,8 +314,10 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
 
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
@ 2018-03-14 21:47     ` Ananyev, Konstantin
  2018-03-19  3:15       ` Dai, Wei
  0 siblings, 1 reply; 28+ messages in thread
From: Ananyev, Konstantin @ 2018-03-14 21:47 UTC (permalink / raw)
  To: Dai, Wei, Lu, Wenzhuo; +Cc: dev

Hi Wei,

> -----Original Message-----
> From: Dai, Wei
> Sent: Wednesday, March 7, 2018 1:06 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> 
> Ethdev Rx offloads API has changed since:
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
> This commit support the new Rx offloads API.
> 
> Signed-off-by: Wei Dai <wei.dai@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
>  drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
>  drivers/net/ixgbe/ixgbe_rxtx.c            | 163 ++++++++++++++++++++++++++----
>  drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
>  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
>  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
>  6 files changed, 205 insertions(+), 66 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 8bb67ba..9437f05 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
>  static int
>  ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>  {
> +	struct rte_eth_rxmode *rxmode;
> +	rxmode = &dev->data->dev_conf.rxmode;
> +
>  	if (mask & ETH_VLAN_STRIP_MASK) {
>  		ixgbe_vlan_hw_strip_config(dev);
>  	}
> 
>  	if (mask & ETH_VLAN_FILTER_MASK) {
> -		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
> +		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
>  			ixgbe_vlan_hw_filter_enable(dev);
>  		else
>  			ixgbe_vlan_hw_filter_disable(dev);
>  	}
> 
>  	if (mask & ETH_VLAN_EXTEND_MASK) {
> -		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
> +		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
>  			ixgbe_vlan_hw_extend_enable(dev);
>  		else
>  			ixgbe_vlan_hw_extend_disable(dev);
> @@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>  		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
>  	struct ixgbe_adapter *adapter =
>  		(struct ixgbe_adapter *)dev->data->dev_private;
> +	struct rte_eth_dev_info dev_info;
> +	uint64_t rx_offloads;
>  	int ret;
> 
>  	PMD_INIT_FUNC_TRACE();
> @@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>  		return ret;
>  	}
> 
> +	ixgbe_dev_info_get(dev, &dev_info);
> +	rx_offloads = dev->data->dev_conf.rxmode.offloads;
> +	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> +		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> +			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			    rx_offloads, dev_info.rx_offload_capa);
> +		return -ENOTSUP;
> +	}
> +
>  	/* set flag to update link status after init */
>  	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
> 
> @@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  	else
>  		dev_info->max_vmdq_pools = ETH_64_POOLS;
>  	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
> -	dev_info->rx_offload_capa =
> -		DEV_RX_OFFLOAD_VLAN_STRIP |
> -		DEV_RX_OFFLOAD_IPV4_CKSUM |
> -		DEV_RX_OFFLOAD_UDP_CKSUM  |
> -		DEV_RX_OFFLOAD_TCP_CKSUM  |
> -		DEV_RX_OFFLOAD_CRC_STRIP;
> -
> -	/*
> -	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> -	 * mode.
> -	 */
> -	if ((hw->mac.type == ixgbe_mac_82599EB ||
> -	     hw->mac.type == ixgbe_mac_X540) &&
> -	    !RTE_ETH_DEV_SRIOV(dev).active)
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
> -
> -	if (hw->mac.type == ixgbe_mac_82599EB ||
> -	    hw->mac.type == ixgbe_mac_X540)
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> -
> -	if (hw->mac.type == ixgbe_mac_X550 ||
> -	    hw->mac.type == ixgbe_mac_X550EM_x ||
> -	    hw->mac.type == ixgbe_mac_X550EM_a)
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> +	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> +				     dev_info->rx_queue_offload_capa);
> 
>  	dev_info->tx_offload_capa =
>  		DEV_TX_OFFLOAD_VLAN_INSERT |
> @@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
> 
>  #ifdef RTE_LIBRTE_SECURITY
> -	if (dev->security_ctx) {
> -		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +	if (dev->security_ctx)
>  		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> -	}
>  #endif
> 
>  	dev_info->default_rxconf = (struct rte_eth_rxconf) {
> @@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  		},
>  		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
>  		.rx_drop_en = 0,
> +		.offloads = 0,
>  	};
> 
>  	dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>  		dev_info->max_vmdq_pools = ETH_16_POOLS;
>  	else
>  		dev_info->max_vmdq_pools = ETH_64_POOLS;
> -	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
> -				DEV_RX_OFFLOAD_IPV4_CKSUM |
> -				DEV_RX_OFFLOAD_UDP_CKSUM  |
> -				DEV_RX_OFFLOAD_TCP_CKSUM  |
> -				DEV_RX_OFFLOAD_CRC_STRIP;
> +	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> +	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> +				     dev_info->rx_queue_offload_capa);
>  	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
>  				DEV_TX_OFFLOAD_IPV4_CKSUM  |
>  				DEV_TX_OFFLOAD_UDP_CKSUM   |
> @@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>  		},
>  		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
>  		.rx_drop_en = 0,
> +		.offloads = 0,
>  	};
> 
>  	dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> 
>  	/* switch to jumbo mode if needed */
>  	if (frame_size > ETHER_MAX_LEN) {
> -		dev->data->dev_conf.rxmode.jumbo_frame = 1;
> +		dev->data->dev_conf.rxmode.offloads |=
> +			DEV_RX_OFFLOAD_JUMBO_FRAME;
>  		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
>  	} else {
> -		dev->data->dev_conf.rxmode.jumbo_frame = 0;
> +		dev->data->dev_conf.rxmode.offloads &=
> +			~DEV_RX_OFFLOAD_JUMBO_FRAME;
>  		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
>  	}
>  	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
> @@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
>  	struct rte_eth_conf *conf = &dev->data->dev_conf;
>  	struct ixgbe_adapter *adapter =
>  			(struct ixgbe_adapter *)dev->data->dev_private;
> +	struct rte_eth_dev_info dev_info;
> +	uint64_t rx_offloads;
> 
>  	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
>  		     dev->data->port_id);
> 
> +	ixgbevf_dev_info_get(dev, &dev_info);
> +	rx_offloads = dev->data->dev_conf.rxmode.offloads;
> +	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> +		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> +			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +			    rx_offloads, dev_info.rx_offload_capa);
> +		return -ENOTSUP;
> +	}
> +
>  	/*
>  	 * VF has no ability to enable/disable HW CRC
>  	 * Keep the persistent behavior the same as Host PF
>  	 */
>  #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
> -	if (!conf->rxmode.hw_strip_crc) {
> +	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
>  		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
> -		conf->rxmode.hw_strip_crc = 1;
> +		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
>  	}
>  #else
> -	if (conf->rxmode.hw_strip_crc) {
> +	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
>  		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
> -		conf->rxmode.hw_strip_crc = 0;
> +		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
>  	}
>  #endif
> 
> @@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>  			   uint16_t queue_idx, uint16_t tx_rate)
>  {
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct rte_eth_rxmode *rxmode;
>  	uint32_t rf_dec, rf_int;
>  	uint32_t bcnrc_val;
>  	uint16_t link_speed = dev->data->dev_link.link_speed;
> @@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>  		bcnrc_val = 0;
>  	}
> 
> +	rxmode = &dev->data->dev_conf.rxmode;
>  	/*
>  	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
>  	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
>  	 * set as 0x4.
>  	 */
> -	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
> -		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
> -				IXGBE_MAX_JUMBO_FRAME_SIZE))
> +	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
> +	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
>  		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
>  			IXGBE_MMW_SIZE_JUMBO_FRAME);
>  	else
> @@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
>  	/* refuse mtu that requires the support of scattered packets when this
>  	 * feature has not been enabled before.
>  	 */
> -	if (!rx_conf->enable_scatter &&
> +	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
>  	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
>  	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
>  		return -EINVAL;
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
> index 176ec0f..29e4728 100644
> --- a/drivers/net/ixgbe/ixgbe_ipsec.c
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.c
> @@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
>  {
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>  	uint32_t reg;
> +	uint64_t rx_offloads;
> 
> +	rx_offloads = dev->data->dev_conf.rxmode.offloads;
>  	/* sanity checks */
> -	if (dev->data->dev_conf.rxmode.enable_lro) {
> +	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
>  		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
>  		return -1;
>  	}
> -	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
> +	if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
>  		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
>  		return -1;
>  	}
> @@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
>  	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
>  	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
> 
> -	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
> +	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
>  		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
>  		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
>  		if (reg != 0) {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 5c45eb4..a5d4822 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
>  #endif
>  }
> 
> +static int
> +ixgbe_is_vf(struct rte_eth_dev *dev)
> +{
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +	switch (hw->mac.type) {
> +	case ixgbe_mac_82599_vf:
> +	case ixgbe_mac_X540_vf:
> +	case ixgbe_mac_X550_vf:
> +	case ixgbe_mac_X550EM_x_vf:
> +	case ixgbe_mac_X550EM_a_vf:
> +		return 1;
> +	default:
> +		return 0;
> +	}
> +}
> +
> +uint64_t
> +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
> +{
> +	uint64_t offloads;
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +	offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;

As I can see I ixgbe all header_split code is enabled only if RTE_HEADER_SPLIT_ENABLE is on.
It is off by default and I doubt anyone really using it these days.
So I think the best thing would be not to advertise  DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all,
and probably remove related code.
If you'd prefer to keep it, then at least we should set that capability only
at #ifdef RTE_HEADER_SPLIT_ENABLE.
Another thing - 	it should be per port, not per queue.
Thought I think better is just to remove it completely.

> +	if (hw->mac.type != ixgbe_mac_82598EB)
> +		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> +	return offloads;
> +}
> +
> +uint64_t
> +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
> +{
> +	uint64_t offloads;
> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
> +		   DEV_RX_OFFLOAD_UDP_CKSUM   |
> +		   DEV_RX_OFFLOAD_TCP_CKSUM   |
> +		   DEV_RX_OFFLOAD_CRC_STRIP   |
> +		   DEV_RX_OFFLOAD_JUMBO_FRAME |
> +		   DEV_RX_OFFLOAD_SCATTER;
> +
> +	if (hw->mac.type == ixgbe_mac_82598EB)
> +		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> +	if (ixgbe_is_vf(dev) == 0)
> +		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
> +			     DEV_RX_OFFLOAD_VLAN_EXTEND);
> +
> +	/*
> +	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> +	 * mode.
> +	 */
> +	if ((hw->mac.type == ixgbe_mac_82599EB ||
> +	     hw->mac.type == ixgbe_mac_X540) &&
> +	    !RTE_ETH_DEV_SRIOV(dev).active)
> +		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
> +
> +	if (hw->mac.type == ixgbe_mac_82599EB ||
> +	    hw->mac.type == ixgbe_mac_X540)
> +		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> +
> +	if (hw->mac.type == ixgbe_mac_X550 ||
> +	    hw->mac.type == ixgbe_mac_X550EM_x ||
> +	    hw->mac.type == ixgbe_mac_X550EM_a)
> +		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +
> +#ifdef RTE_LIBRTE_SECURITY

I don't think you need that ifdef here.

> +	if (dev->security_ctx)
> +		offloads |= DEV_RX_OFFLOAD_SECURITY;
> +#endif
> +
> +	return offloads;
> +}
> +
> +static int
> +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
> +{
> +	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> +	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> +	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> +
> +	if ((requested & (queue_supported | port_supported)) != requested)
> +		return 0;
> +
> +	if ((port_offloads ^ requested) & port_supported)

Could you explain a bit more what are you cheking here?
As I can see:
 (port_offloads ^ requested) - that's a diff between already set and newly
requested offloads.
Then you check if that diff consists of supported by port offloads,
and if yes you return an error?  
Konstantin

> +		return 0;
> +
> +	return 1;
> +}
> +

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx offloads API
  2018-02-27 16:01 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
@ 2018-03-14 23:18   ` Ananyev, Konstantin
  2018-03-19  6:24     ` Dai, Wei
  0 siblings, 1 reply; 28+ messages in thread
From: Ananyev, Konstantin @ 2018-03-14 23:18 UTC (permalink / raw)
  To: Dai, Wei, Lu, Wenzhuo; +Cc: dev


> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
> index 30095fa..d7f0535 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
>  	uint8_t             hthresh;       /**< Host threshold register. */
>  	uint8_t             wthresh;       /**< Write-back threshold reg. */
>  	uint32_t txq_flags; /**< Holds flags for this TXq */
> +	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
>  	uint32_t            ctx_curr;      /**< Hardware context states. */
>  	/** Hardware context0 history. */
>  	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
> @@ -254,6 +255,12 @@ struct ixgbe_txq_ops {
>  #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
>  			    ETH_TXQ_FLAGS_NOOFFLOADS)
> 
> +#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
> +					DEV_TX_OFFLOAD_VLAN_INSERT |\
> +					DEV_TX_OFFLOAD_SCTP_CKSUM |\
> +					DEV_TX_OFFLOAD_UDP_CKSUM |\
> +					DEV_TX_OFFLOAD_TCP_CKSUM)


Hmm and why IP_CKSUM, TSO, OUTER_IP_CKSUM, etc. is not included into that macro?
In fact do you really need that?
As I understand right now vector TX doesn't support any offloads, so tx_offload != 0,
should be enough for tx function selection, right?
Konstanitn 

> +
>  /*
>   * Populate descriptors with the following info:
>   * 1.) buffer_addr = phys_addr + headroom
> @@ -307,6 +314,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
>  				    uint16_t nb_pkts);
>  int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
> 
> +uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
>  uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
>  uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
> 
> --
> 2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-14 21:47     ` Ananyev, Konstantin
@ 2018-03-19  3:15       ` Dai, Wei
  2018-03-20 11:53         ` Ananyev, Konstantin
  0 siblings, 1 reply; 28+ messages in thread
From: Dai, Wei @ 2018-03-19  3:15 UTC (permalink / raw)
  To: Ananyev, Konstantin, Lu, Wenzhuo; +Cc: dev

Hi, Konstantin
Thanks for your feedback.

> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Thursday, March 15, 2018 5:48 AM
> To: Dai, Wei <wei.dai@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> 
> Hi Wei,
> 
> > -----Original Message-----
> > From: Dai, Wei
> > Sent: Wednesday, March 7, 2018 1:06 PM
> > To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> > Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> >
> > Ethdev Rx offloads API has changed since:
> > commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This
> > commit support the new Rx offloads API.
> >
> > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
> >  drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
> >  drivers/net/ixgbe/ixgbe_rxtx.c            | 163
> ++++++++++++++++++++++++++----
> >  drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
> >  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
> >  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
> >  6 files changed, 205 insertions(+), 66 deletions(-)
> >
> > +uint64_t
> > +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) {
> > +	uint64_t offloads;
> > +	struct ixgbe_hw *hw =
> > +IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > +	offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
> 
> As I can see I ixgbe all header_split code is enabled only if
> RTE_HEADER_SPLIT_ENABLE is on.
> It is off by default and I doubt anyone really using it these days.
> So I think the best thing would be not to advertise
> DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all, and probably remove
> related code.
> If you'd prefer to keep it, then at least we should set that capability only at
> #ifdef RTE_HEADER_SPLIT_ENABLE.
> Another thing - 	it should be per port, not per queue.
> Thought I think better is just to remove it completely.
I will set this header splitting capability in #ifdef RTE_HEADER_SPLIT_ENABLE in my next patch set.
I think it is a per queue capability as it can be configured on the register IXGBE_SRRCTL of every Rx queue
In this code line: IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); in ixgbe_dev_rx_init( ).
Same case is also in the code line: IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); in ixgbevf_dev_rx_init( ).

> > +static int
> > +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> > +requested) {
> > +	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> > +	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> > +	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> > +
> > +	if ((requested & (queue_supported | port_supported)) != requested)
> > +		return 0;
> > +
> > +	if ((port_offloads ^ requested) & port_supported)
> 
> Could you explain a bit more what are you cheking here?
> As I can see:
>  (port_offloads ^ requested) - that's a diff between already set and newly
> requested offloads.
> Then you check if that diff consists of supported by port offloads, and if yes
> you return an error?
> Konstantin
> 
This function is similar to mlx4_check_rx_queue_offloads() in mlx4 driver.
As the git log message in the commit ce17eddefc20285bbfe575bdc07f42f0b20f34cb say
that a per port capability should has same setting (enabling or disabling) on both port
configuration via rte_eth_dev_configure( ) and queue configuration via rte_eth_rx_queue_setup( ).
This function check if this requirement is matched or not.
It also check offloading request is supported as a per port or a per queue capability or not.
If above checking is pass, it return 1 else return 0.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx offloads API
  2018-03-14 23:18   ` Ananyev, Konstantin
@ 2018-03-19  6:24     ` Dai, Wei
  0 siblings, 0 replies; 28+ messages in thread
From: Dai, Wei @ 2018-03-19  6:24 UTC (permalink / raw)
  To: Ananyev, Konstantin, Lu, Wenzhuo; +Cc: dev

Hi, Konstantin
Thanks for your feedback.

> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Thursday, March 15, 2018 7:19 AM
> To: Dai, Wei <wei.dai@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 4/4] net/ixgbe: convert to new Tx offloads API
> 
> 
> > diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h
> > b/drivers/net/ixgbe/ixgbe_rxtx.h index 30095fa..d7f0535 100644
> > --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> > +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> > @@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
> >  	uint8_t             hthresh;       /**< Host threshold register. */
> >  	uint8_t             wthresh;       /**< Write-back threshold reg.
> */
> >  	uint32_t txq_flags; /**< Holds flags for this TXq */
> > +	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
> >  	uint32_t            ctx_curr;      /**< Hardware context states.
> */
> >  	/** Hardware context0 history. */
> >  	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; @@ -254,6
> +255,12
> > @@ struct ixgbe_txq_ops {  #define IXGBE_SIMPLE_FLAGS
> > ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
> >  			    ETH_TXQ_FLAGS_NOOFFLOADS)
> >
> > +#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS
> ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
> > +					DEV_TX_OFFLOAD_VLAN_INSERT |\
> > +					DEV_TX_OFFLOAD_SCTP_CKSUM |\
> > +					DEV_TX_OFFLOAD_UDP_CKSUM |\
> > +					DEV_TX_OFFLOAD_TCP_CKSUM)
> 
> 
> Hmm and why IP_CKSUM, TSO, OUTER_IP_CKSUM, etc. is not included into
> that macro?
> In fact do you really need that?
> As I understand right now vector TX doesn't support any offloads, so
> tx_offload != 0, should be enough for tx function selection, right?
> Konstanitn
In this patch, IXGBE_SIMPLE_TX_OFFLOAD_FLAGS is just a reverse transform of
IXGBE_SIMPLE_FLAGS which is used in old offload API.
Yes, current vector Tx doesn't support any offloads.
I will use tx_offload==0 following your guide.
> 
> > +
> >  /*
> >   * Populate descriptors with the following info:
> >   * 1.) buffer_addr = phys_addr + headroom @@ -307,6 +314,7 @@
> > uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> >  				    uint16_t nb_pkts);
> >  int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
> >
> > +uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
> >  uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
> > uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
> >
> > --
> > 2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new offloads API
  2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
                     ` (3 preceding siblings ...)
  2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
@ 2018-03-19  7:04   ` Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
                       ` (4 more replies)
  4 siblings, 5 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-19  7:04 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

This patch set adds support of per queue VLAN strip offloading
in ixgbe PF and VF.
This patch support new offloads API in ixgbe PF and VF.

---
v3: Rx header spliting capability is only enabled in
    #ifdef RTE_HEADER_SPLIT_ENABLE.
    Tx vector tranmit function only work without any Tx offloads.
   
v2: improve error checking

Wei Dai (4):
  net/ixgbe: support VLAN strip per queue offloading in PF
  net/ixgbe: support VLAN strip per queue offloading in VF
  net/ixgbe: convert to new Rx offloads API
  net/ixgbe: convert to new Tx offloads API

 drivers/net/ixgbe/ixgbe_ethdev.c          | 264 ++++++++++++++----------------
 drivers/net/ixgbe/ixgbe_ethdev.h          |   4 +-
 drivers/net/ixgbe/ixgbe_ipsec.c           |  13 +-
 drivers/net/ixgbe/ixgbe_pf.c              |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 245 ++++++++++++++++++++++++---
 drivers/net/ixgbe/ixgbe_rxtx.h            |   7 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 8 files changed, 370 insertions(+), 172 deletions(-)

-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
@ 2018-03-19  7:04     ` Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
                       ` (3 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-19  7:04 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in PF. With this patch
it can be enabled or disabled on any Rx queue in PF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 109 +++++++++++++++++----------------------
 drivers/net/ixgbe/ixgbe_ethdev.h |   4 +-
 drivers/net/ixgbe/ixgbe_pf.c     |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c   |   1 +
 drivers/net/ixgbe/ixgbe_rxtx.h   |   1 +
 5 files changed, 51 insertions(+), 69 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4483258..73755d2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2001,64 +2001,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 }
 
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl &= ~IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl &= ~IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
-		}
-	}
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl |= IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
-		}
-	}
-}
-
 static void
 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
@@ -2114,14 +2056,57 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	 */
 }
 
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	uint32_t ctrl;
+	uint16_t i;
+	struct ixgbe_rx_queue *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl |= IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		} else {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl &= ~IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		}
+	} else {
+		/*
+		 * Other 10G NIC, the VLAN strip can be setup
+		 * per queue in RXDCTL
+		 */
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+				ctrl |= IXGBE_RXDCTL_VME;
+				on = TRUE;
+			} else {
+				ctrl &= ~IXGBE_RXDCTL_VME;
+				on = FALSE;
+			}
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+			/* record those setting for HW strip per queue */
+			ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+		}
+	}
+}
+
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-			ixgbe_vlan_hw_strip_enable_all(dev);
-		else
-			ixgbe_vlan_hw_strip_disable_all(dev);
+		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d652..6550777 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -659,9 +659,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
 
 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
 
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
 
 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
 
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea99737..4e61310 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -329,10 +329,7 @@ set_rx_mode(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-		ixgbe_vlan_hw_strip_enable_all(dev);
-	else
-		ixgbe_vlan_hw_strip_disable_all(dev);
+	ixgbe_vlan_hw_strip_config(dev);
 }
 
 static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4..5c45eb4 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2820,6 +2820,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 							0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = rx_conf->offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718b..ab5f01e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
+	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
@ 2018-03-19  7:04     ` Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
                       ` (2 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-19  7:04 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in VF. With this patch
it can be enabled or disabled on any Rx queue in VF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 73755d2..8bb67ba 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5215,15 +5215,17 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_rx_queue *rxq;
 	uint16_t i;
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
-		for (i = 0; i < hw->mac.max_rx_queues; i++)
+		for (i = 0; i < hw->mac.max_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
+		}
 	}
 
 	return 0;
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
@ 2018-03-19  7:04     ` Wei Dai
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx " Wei Dai
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-19  7:04 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
 drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 165 +++++++++++++++++++++++++++---
 drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 6 files changed, 207 insertions(+), 66 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bb67ba..9437f05 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	if (mask & ETH_VLAN_STRIP_MASK) {
 		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
 	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	ixgbe_dev_info_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_CRC_STRIP;
-
-	/*
-	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
-	 * mode.
-	 */
-	if ((hw->mac.type == ixgbe_mac_82599EB ||
-	     hw->mac.type == ixgbe_mac_X540) &&
-	    !RTE_ETH_DEV_SRIOV(dev).active)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	if (dev->security_ctx)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	}
 #endif
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		dev_info->max_vmdq_pools = ETH_16_POOLS;
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM  |
-				DEV_RX_OFFLOAD_CRC_STRIP;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
+	ixgbevf_dev_info_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 			   uint16_t queue_idx, uint16_t tx_rate)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rf_dec, rf_int;
 	uint32_t bcnrc_val;
 	uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 		bcnrc_val = 0;
 	}
 
+	rxmode = &dev->data->dev_conf.rxmode;
 	/*
 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
 	 * set as 0x4.
 	 */
-	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-				IXGBE_MAX_JUMBO_FRAME_SIZE))
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
 			IXGBE_MMW_SIZE_JUMBO_FRAME);
 	else
@@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	/* refuse mtu that requires the support of scattered packets when this
 	 * feature has not been enabled before.
 	 */
-	if (!rx_conf->enable_scatter &&
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
 	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
 		return -EINVAL;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0f..29e4728 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
+	uint64_t rx_offloads;
 
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	/* sanity checks */
-	if (dev->data->dev_conf.rxmode.enable_lro) {
+	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5c45eb4..5e8635b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2769,6 +2769,100 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
 #endif
 }
 
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599_vf:
+	case ixgbe_mac_X540_vf:
+	case ixgbe_mac_X550_vf:
+	case ixgbe_mac_X550EM_x_vf:
+	case ixgbe_mac_X550EM_a_vf:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads = 0;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+#ifdef RTE_HEADER_SPLIT_ENABLE
+	offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
+#endif
+	if (hw->mac.type != ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		   DEV_RX_OFFLOAD_UDP_CKSUM   |
+		   DEV_RX_OFFLOAD_TCP_CKSUM   |
+		   DEV_RX_OFFLOAD_CRC_STRIP   |
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+		   DEV_RX_OFFLOAD_SCATTER;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	if (ixgbe_is_vf(dev) == 0)
+		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+	/*
+	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+	 * mode.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB ||
+	     hw->mac.type == ixgbe_mac_X540) &&
+	    !RTE_ETH_DEV_SRIOV(dev).active)
+		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+	return offloads;
+}
+
+static int
+ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
+	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2787,6 +2881,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			ixgbe_get_rx_port_offloads(dev),
+			ixgbe_get_rx_queue_offloads(dev));
+		return -ENOTSUP;
+	}
+
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
@@ -2816,8 +2922,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-							0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 	rxq->offloads = rx_conf->offloads;
@@ -4575,7 +4681,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && rx_conf->enable_lro) {
+	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4583,7 +4689,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4597,7 +4704,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->enable_lro))
+	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		/*
 		 * Since NFS packets coalescing is not supported - clear
 		 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4610,7 +4717,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!rx_conf->enable_lro)
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -4730,7 +4837,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->hw_strip_crc)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4738,7 +4845,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->jumbo_frame == 1) {
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 		maxfrs &= 0x0000FFFF;
@@ -4758,6 +4865,12 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rx_conf->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+			       DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -4766,7 +4879,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+		rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+				0 : ETHER_CRC_LEN;
 
 		/* Setup the Base and Length of the Rx Descriptor Rings */
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -4784,7 +4898,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/*
 		 * Configure Header Split
 		 */
-		if (rx_conf->header_split) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+			/* add Header Split flag for set_rx_function( ) */
+			rx_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
 			if (hw->mac.type == ixgbe_mac_82599EB) {
 				/* Must setup the PSRTYPE register */
 				uint32_t psrtype;
@@ -4827,9 +4943,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->enable_scatter)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4844,7 +4962,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->hw_ip_checksum)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4854,7 +4972,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->hw_strip_crc)
+		if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5260,6 +5378,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
@@ -5290,6 +5409,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
 	struct ixgbe_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint64_t bus_addr;
 	uint32_t srrctl, psrtype = 0;
 	uint16_t buf_size;
@@ -5329,6 +5449,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	ixgbevf_rlpml_set_vf(hw,
 		(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
 
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rxmode->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+			      DEV_RX_OFFLOAD_VLAN_STRIP);
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5356,7 +5482,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		/*
 		 * Configure Header Split
 		 */
-		if (dev->data->dev_conf.rxmode.header_split) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+			/* add Header Split flag for set_rx_function( ) */
+			rxmode->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
 			srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
 				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 				IXGBE_SRRCTL_BSIZEHDR_MASK);
@@ -5388,18 +5516,21 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (dev->data->dev_conf.rxmode.enable_scatter ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
-		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+		    (rxmode->max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->data->scattered_rx = 1;
 		}
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
 #ifdef RTE_HEADER_SPLIT_ENABLE
-	if (dev->data->dev_conf.rxmode.header_split)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
 		/* Must setup the PSRTYPE register */
 		psrtype = IXGBE_PSRTYPE_TCPHDR |
 			IXGBE_PSRTYPE_UDPHDR   |
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ab5f01e..30095fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a..d3eb060 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -286,7 +286,7 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 		return -1;
 
 	/* no header split support */
-	if (rxmode->header_split == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
 		return -1;
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998..edb1383 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
 	/* no csum error report support */
-	if (rxmode->hw_ip_checksum == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		return -1;
 
 	return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx offloads API
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
                       ` (2 preceding siblings ...)
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
@ 2018-03-19  7:04     ` Wei Dai
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-19  7:04 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 56 +++++++++++++---------------
 drivers/net/ixgbe/ixgbe_ipsec.c  |  5 ++-
 drivers/net/ixgbe/ixgbe_rxtx.c   | 79 ++++++++++++++++++++++++++++++++++++++--
 drivers/net/ixgbe/ixgbe_rxtx.h   |  3 ++
 4 files changed, 108 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 9437f05..6288690 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2337,6 +2337,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		(struct ixgbe_adapter *)dev->data->dev_private;
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2356,6 +2357,13 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3649,28 +3657,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3692,7 +3680,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -3776,12 +3766,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3803,7 +3789,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -4941,6 +4929,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 			(struct ixgbe_adapter *)dev->data->dev_private;
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
@@ -4953,6 +4942,13 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 29e4728..de7ed36 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+
 	/* sanity checks */
 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5e8635b..f8346c0 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+	if ((txq->offloads == 0) &&
 #ifdef RTE_LIBRTE_SECURITY
 			!(txq->using_ipsec) &&
 #endif
@@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	} else {
 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
 		PMD_INIT_LOG(DEBUG,
-				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
-				(unsigned long)txq->txq_flags,
-				(unsigned long)IXGBE_SIMPLE_FLAGS);
+				" - offloads = 0x%" PRIx64,
+				txq->offloads);
 		PMD_INIT_LOG(DEBUG,
 				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
 				(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2409,60 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	}
 }
 
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+	return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
+	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2426,6 +2479,22 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
+	 * Don't verify port offloads for application which
+	 * use the old API.
+	 */
+	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64,
+			(void *)dev, tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			ixgbe_get_tx_queue_offloads(dev),
+			ixgbe_get_tx_port_offloads(dev));
+		return -ENOTSUP;
+	}
+
+	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
@@ -2551,6 +2620,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
+	txq->offloads = tx_conf->offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -5398,6 +5468,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
 	qinfo->conf.txq_flags = txq->txq_flags;
+	qinfo->conf.offloads = txq->offloads;
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 30095fa..642cf4d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -307,8 +308,10 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
 
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
-- 
2.7.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-19  3:15       ` Dai, Wei
@ 2018-03-20 11:53         ` Ananyev, Konstantin
  2018-03-21 14:03           ` Dai, Wei
  0 siblings, 1 reply; 28+ messages in thread
From: Ananyev, Konstantin @ 2018-03-20 11:53 UTC (permalink / raw)
  To: Dai, Wei, Lu, Wenzhuo; +Cc: dev


Hi Wei,

> 
> Hi, Konstantin
> Thanks for your feedback.
> 
> > -----Original Message-----
> > From: Ananyev, Konstantin
> > Sent: Thursday, March 15, 2018 5:48 AM
> > To: Dai, Wei <wei.dai@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> > Cc: dev@dpdk.org
> > Subject: RE: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> >
> > Hi Wei,
> >
> > > -----Original Message-----
> > > From: Dai, Wei
> > > Sent: Wednesday, March 7, 2018 1:06 PM
> > > To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> > > <konstantin.ananyev@intel.com>
> > > Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> > > Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> > >
> > > Ethdev Rx offloads API has changed since:
> > > commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This
> > > commit support the new Rx offloads API.
> > >
> > > Signed-off-by: Wei Dai <wei.dai@intel.com>
> > > ---
> > >  drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
> > >  drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
> > >  drivers/net/ixgbe/ixgbe_rxtx.c            | 163
> > ++++++++++++++++++++++++++----
> > >  drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
> > >  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
> > >  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
> > >  6 files changed, 205 insertions(+), 66 deletions(-)
> > >
> > > +uint64_t
> > > +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) {
> > > +	uint64_t offloads;
> > > +	struct ixgbe_hw *hw =
> > > +IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > > +
> > > +	offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;
> >
> > As I can see I ixgbe all header_split code is enabled only if
> > RTE_HEADER_SPLIT_ENABLE is on.
> > It is off by default and I doubt anyone really using it these days.
> > So I think the best thing would be not to advertise
> > DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all, and probably remove
> > related code.
> > If you'd prefer to keep it, then at least we should set that capability only at
> > #ifdef RTE_HEADER_SPLIT_ENABLE.
> > Another thing - 	it should be per port, not per queue.
> > Thought I think better is just to remove it completely.
> I will set this header splitting capability in #ifdef RTE_HEADER_SPLIT_ENABLE in my next patch set.
> I think it is a per queue capability as it can be configured on the register IXGBE_SRRCTL of every Rx queue
> In this code line: IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); in ixgbe_dev_rx_init( ).
> Same case is also in the code line: IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); in ixgbevf_dev_rx_init( ).

Yes, HW can enable/disable it on a per queue basis.
Though it affects rx function selection, and as right now we have one rx function per device -
That's why it looks to me more like a per port offload.
Though I believe these days ixgbe PMD doesn't support it properly anyway 
(we always set rxd.hdr_addr to zero) - so probably better to remove it at all.

> 
> > > +static int
> > > +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> > > +requested) {
> > > +	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> > > +	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> > > +	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> > > +
> > > +	if ((requested & (queue_supported | port_supported)) != requested)
> > > +		return 0;
> > > +
> > > +	if ((port_offloads ^ requested) & port_supported)
> >
> > Could you explain a bit more what are you cheking here?
> > As I can see:
> >  (port_offloads ^ requested) - that's a diff between already set and newly
> > requested offloads.
> > Then you check if that diff consists of supported by port offloads, and if yes
> > you return an error?
> > Konstantin
> >
> This function is similar to mlx4_check_rx_queue_offloads() in mlx4 driver.
> As the git log message in the commit ce17eddefc20285bbfe575bdc07f42f0b20f34cb say
> that a per port capability should has same setting (enabling or disabling) on both port
> configuration via rte_eth_dev_configure( ) and queue configuration via rte_eth_rx_queue_setup( ).
> This function check if this requirement is matched or not.
> It also check offloading request is supported as a per port or a per queue capability or not.
> If above checking is pass, it return 1 else return 0.

Ok, let be more specific here.
Let say:
requested == DEV_RX_OFFLOAD_VLAN_STRIP;
port_offloads == DEV_RX_OFFLOAD_IPV4_CKSUM;
port_supported = (DEV_RX_OFFLOAD_IPV4_CKSUM  |
		   DEV_RX_OFFLOAD_UDP_CKSUM   |
		   DEV_RX_OFFLOAD_TCP_CKSUM   |
		   DEV_RX_OFFLOAD_CRC_STRIP   |
		   DEV_RX_OFFLOAD_JUMBO_FRAME |
		   DEV_RX_OFFLOAD_SCATTER);

(port_offloads ^ requested) == DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM;
(port_offloads ^ requested) & port_supported == DEV_RX_OFFLOAD_IPV4_CKSUM;
And that function will return failure, while as I understand it shouldn't - requested queue offload is valid.

Konstantin

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-20 11:53         ` Ananyev, Konstantin
@ 2018-03-21 14:03           ` Dai, Wei
  0 siblings, 0 replies; 28+ messages in thread
From: Dai, Wei @ 2018-03-21 14:03 UTC (permalink / raw)
  To: Ananyev, Konstantin, Lu, Wenzhuo; +Cc: dev

Hi, Konstantin
Thanks for your patient guidance!
> IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); in
> ixgbe_dev_rx_init( ).
> > Same case is also in the code line: IXGBE_WRITE_REG(hw,
> IXGBE_VFSRRCTL(i), srrctl); in ixgbevf_dev_rx_init( ).
> 
> Yes, HW can enable/disable it on a per queue basis.
> Though it affects rx function selection, and as right now we have one rx
> function per device - That's why it looks to me more like a per port offload.
> Though I believe these days ixgbe PMD doesn't support it properly anyway
> (we always set rxd.hdr_addr to zero) - so probably better to remove it at all.
> 
Yes, rx function is related with offloading.
I'll remove this header split capability in my next patch set.

> >
> > > > +static int
> > > > +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t
> > > > +requested) {
> > > > +	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> > > > +	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> > > > +	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> > > > +
> > > > +	if ((requested & (queue_supported | port_supported)) != requested)
> > > > +		return 0;
> > > > +
> > > > +	if ((port_offloads ^ requested) & port_supported)
> > >
> > > Could you explain a bit more what are you cheking here?
> > > As I can see:
> > >  (port_offloads ^ requested) - that's a diff between already set and
> > > newly requested offloads.
> > > Then you check if that diff consists of supported by port offloads,
> > > and if yes you return an error?
> > > Konstantin
> > >
> > This function is similar to mlx4_check_rx_queue_offloads() in mlx4 driver.
> > As the git log message in the commit
> > ce17eddefc20285bbfe575bdc07f42f0b20f34cb say that a per port
> > capability should has same setting (enabling or disabling) on both port
> configuration via rte_eth_dev_configure( ) and queue configuration via
> rte_eth_rx_queue_setup( ).
> > This function check if this requirement is matched or not.
> > It also check offloading request is supported as a per port or a per queue
> capability or not.
> > If above checking is pass, it return 1 else return 0.
> 
> Ok, let be more specific here.
> Let say:
> requested == DEV_RX_OFFLOAD_VLAN_STRIP;
> port_offloads == DEV_RX_OFFLOAD_IPV4_CKSUM; port_supported =
> (DEV_RX_OFFLOAD_IPV4_CKSUM  |
> 		   DEV_RX_OFFLOAD_UDP_CKSUM   |
> 		   DEV_RX_OFFLOAD_TCP_CKSUM   |
> 		   DEV_RX_OFFLOAD_CRC_STRIP   |
> 		   DEV_RX_OFFLOAD_JUMBO_FRAME |
> 		   DEV_RX_OFFLOAD_SCATTER);
> 
> (port_offloads ^ requested) == DEV_RX_OFFLOAD_VLAN_STRIP |
> DEV_RX_OFFLOAD_IPV4_CKSUM; (port_offloads ^ requested) &
> port_supported == DEV_RX_OFFLOAD_IPV4_CKSUM; And that function will
> return failure, while as I understand it shouldn't - requested queue offload is
> valid.
> 
> Konstantin

I'd like to list the git message of commit ce17eddefc20285bbfe575bdc07f42f0b20f34cb which 
has been submitted by Shahaf Shuler and already been accepted.
SHA-1: ce17eddefc20285bbfe575bdc07f42f0b20f34cb

* ethdev: introduce Rx queue offloads API

Introduce a new API to configure Rx offloads.

In the new API, offloads are divided into per-port and per-queue
offloads. The PMD reports capability for each of them.
Offloads are enabled using the existing DEV_RX_OFFLOAD_* flags.
To enable per-port offload, the offload should be set on both device
configuration and queue configuration. To enable per-queue offload, the
offloads can be set only on queue configuration.

Applications should set the ignore_offload_bitfield bit on rxmode
structure in order to move to the new API.

The old Rx offloads API is kept for the meanwhile, in order to enable a
smooth transition for PMDs and application to the new API.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

In your example, IPV4_CKSUM is a per port offloading, it is
Enabled in port_offloads to rte_eth_dev_configure(), and it
Should also be enabled in requested to rte_eth_rx_queue_setup( ).
So your example fails in this checking.

This function is very similar with priv_is_rx_queue_offloads_allowed( ) in /net/mlx5/mlx5_rxq.c
In the patch http://dpdk.org/dev/patchwork/patch/33386/ which has already been accepted.
 

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new offloads API
  2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
                       ` (3 preceding siblings ...)
  2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx " Wei Dai
@ 2018-03-22  3:40     ` Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
                         ` (4 more replies)
  4 siblings, 5 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-22  3:40 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

This patch set adds support of per queue VLAN strip offloading
in ixgbe PF and VF.
This patch support new offloads API in ixgbe PF and VF.

---
v4: don't support header spliting any more

v3: Rx header spliting capability is only enabled in
    #ifdef RTE_HEADER_SPLIT_ENABLE.
    Tx vector tranmit function only work without any Tx offloads.
   
v2: improve error checking

Wei Dai (4):
  net/ixgbe: support VLAN strip per queue offloading in PF
  net/ixgbe: support VLAN strip per queue offloading in VF
  net/ixgbe: convert to new Rx offloads API
  net/ixgbe: convert to new Tx offloads API

 drivers/net/ixgbe/ixgbe_ethdev.c          | 264 ++++++++++++++--------------
 drivers/net/ixgbe/ixgbe_ethdev.h          |   4 +-
 drivers/net/ixgbe/ixgbe_ipsec.c           |  13 +-
 drivers/net/ixgbe/ixgbe_pf.c              |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 275 +++++++++++++++++++++++-------
 drivers/net/ixgbe/ixgbe_rxtx.h            |   7 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   5 -
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 8 files changed, 359 insertions(+), 216 deletions(-)

-- 
2.9.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
@ 2018-03-22  3:41       ` Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
                         ` (3 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-22  3:41 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in PF. With this patch
it can be enabled or disabled on any Rx queue in PF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 109 +++++++++++++++++----------------------
 drivers/net/ixgbe/ixgbe_ethdev.h |   4 +-
 drivers/net/ixgbe/ixgbe_pf.c     |   5 +-
 drivers/net/ixgbe/ixgbe_rxtx.c   |   1 +
 drivers/net/ixgbe/ixgbe_rxtx.h   |   1 +
 5 files changed, 51 insertions(+), 69 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4483258..73755d2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2001,64 +2001,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 }
 
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl &= ~IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl &= ~IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
-		}
-	}
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-	struct ixgbe_rx_queue *rxq;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl |= IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	} else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
-		}
-	}
-}
-
 static void
 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
@@ -2114,14 +2056,57 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	 */
 }
 
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	uint32_t ctrl;
+	uint16_t i;
+	struct ixgbe_rx_queue *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl |= IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		} else {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+			ctrl &= ~IXGBE_VLNCTRL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+		}
+	} else {
+		/*
+		 * Other 10G NIC, the VLAN strip can be setup
+		 * per queue in RXDCTL
+		 */
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+				ctrl |= IXGBE_RXDCTL_VME;
+				on = TRUE;
+			} else {
+				ctrl &= ~IXGBE_RXDCTL_VME;
+				on = FALSE;
+			}
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+			/* record those setting for HW strip per queue */
+			ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+		}
+	}
+}
+
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-			ixgbe_vlan_hw_strip_enable_all(dev);
-		else
-			ixgbe_vlan_hw_strip_disable_all(dev);
+		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d652..6550777 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -659,9 +659,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
 
 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
 
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
 
 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
 
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea99737..4e61310 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -329,10 +329,7 @@ set_rx_mode(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-		ixgbe_vlan_hw_strip_enable_all(dev);
-	else
-		ixgbe_vlan_hw_strip_disable_all(dev);
+	ixgbe_vlan_hw_strip_config(dev);
 }
 
 static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4..5c45eb4 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2820,6 +2820,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 							0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = rx_conf->offloads;
 
 	/*
 	 * The packet type in RX descriptor is different for different NICs.
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718b..ab5f01e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	/** flags to set in mbuf when a vlan is detected. */
 	uint64_t            vlan_flags;
+	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
 	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
 	struct rte_mbuf fake_mbuf;
 	/** hold packets to return to application */
-- 
2.9.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
@ 2018-03-22  3:41       ` Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
                         ` (2 subsequent siblings)
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-22  3:41 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

VLAN strip is a per queue offloading in VF. With this patch
it can be enabled or disabled on any Rx queue in VF.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 73755d2..8bb67ba 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5215,15 +5215,17 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_rx_queue *rxq;
 	uint16_t i;
 	int on = 0;
 
 	/* VF function only support hw strip feature, others are not support */
 	if (mask & ETH_VLAN_STRIP_MASK) {
-		on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
-		for (i = 0; i < hw->mac.max_rx_queues; i++)
+		for (i = 0; i < hw->mac.max_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
 			ixgbevf_vlan_strip_queue_set(dev, i, on);
+		}
 	}
 
 	return 0;
-- 
2.9.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
@ 2018-03-22  3:41       ` Wei Dai
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx " Wei Dai
  2018-04-02 13:27       ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-22  3:41 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++-------
 drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
 drivers/net/ixgbe/ixgbe_rxtx.c            | 195 +++++++++++++++++++++---------
 drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   5 -
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
 6 files changed, 196 insertions(+), 110 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bb67ba..9437f05 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
 static int
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	if (mask & ETH_VLAN_STRIP_MASK) {
 		ixgbe_vlan_hw_strip_config(dev);
 	}
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			ixgbe_vlan_hw_filter_enable(dev);
 		else
 			ixgbe_vlan_hw_filter_disable(dev);
 	}
 
 	if (mask & ETH_VLAN_EXTEND_MASK) {
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			ixgbe_vlan_hw_extend_enable(dev);
 		else
 			ixgbe_vlan_hw_extend_disable(dev);
@@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_adapter *adapter =
 		(struct ixgbe_adapter *)dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	ixgbe_dev_info_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM  |
-		DEV_RX_OFFLOAD_CRC_STRIP;
-
-	/*
-	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
-	 * mode.
-	 */
-	if ((hw->mac.type == ixgbe_mac_82599EB ||
-	     hw->mac.type == ixgbe_mac_X540) &&
-	    !RTE_ETH_DEV_SRIOV(dev).active)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
 #ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx) {
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	if (dev->security_ctx)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-	}
 #endif
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		dev_info->max_vmdq_pools = ETH_16_POOLS;
 	else
 		dev_info->max_vmdq_pools = ETH_64_POOLS;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM  |
-				DEV_RX_OFFLOAD_CRC_STRIP;
+	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		},
 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct ixgbe_adapter *adapter =
 			(struct ixgbe_adapter *)dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
+	ixgbevf_dev_info_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	/*
 	 * VF has no ability to enable/disable HW CRC
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 			   uint16_t queue_idx, uint16_t tx_rate)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rf_dec, rf_int;
 	uint32_t bcnrc_val;
 	uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 		bcnrc_val = 0;
 	}
 
+	rxmode = &dev->data->dev_conf.rxmode;
 	/*
 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
 	 * set as 0x4.
 	 */
-	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-				IXGBE_MAX_JUMBO_FRAME_SIZE))
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
 			IXGBE_MMW_SIZE_JUMBO_FRAME);
 	else
@@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	/* refuse mtu that requires the support of scattered packets when this
 	 * feature has not been enabled before.
 	 */
-	if (!rx_conf->enable_scatter &&
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
 	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
 		return -EINVAL;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0f..29e4728 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
+	uint64_t rx_offloads;
 
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
 	/* sanity checks */
-	if (dev->data->dev_conf.rxmode.enable_lro) {
+	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
 		return -1;
 	}
-	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
 		return -1;
 	}
@@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
 
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+	if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
 		if (reg != 0) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5c45eb4..f6198f0 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2769,6 +2769,97 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
 #endif
 }
 
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599_vf:
+	case ixgbe_mac_X540_vf:
+	case ixgbe_mac_X550_vf:
+	case ixgbe_mac_X550EM_x_vf:
+	case ixgbe_mac_X550EM_a_vf:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads = 0;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (hw->mac.type != ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		   DEV_RX_OFFLOAD_UDP_CKSUM   |
+		   DEV_RX_OFFLOAD_TCP_CKSUM   |
+		   DEV_RX_OFFLOAD_CRC_STRIP   |
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+		   DEV_RX_OFFLOAD_SCATTER;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	if (ixgbe_is_vf(dev) == 0)
+		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+	/*
+	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+	 * mode.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB ||
+	     hw->mac.type == ixgbe_mac_X540) &&
+	    !RTE_ETH_DEV_SRIOV(dev).active)
+		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+	return offloads;
+}
+
+static int
+ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+	uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
+	uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int __attribute__((cold))
 ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2787,6 +2878,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	PMD_INIT_FUNC_TRACE();
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			ixgbe_get_rx_port_offloads(dev),
+			ixgbe_get_rx_queue_offloads(dev));
+		return -ENOTSUP;
+	}
+
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
@@ -2816,8 +2919,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-							0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 	rxq->offloads = rx_conf->offloads;
@@ -4575,7 +4678,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
 		rsc_capable = true;
 
-	if (!rsc_capable && rx_conf->enable_lro) {
+	if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
 				   "support it");
 		return -EINVAL;
@@ -4583,7 +4686,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-	if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+	     (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
 		/*
 		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
 		 * 3.0 RSC configuration requires HW CRC stripping being
@@ -4597,7 +4701,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
 	/* RFCTL configuration  */
 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-	if ((rsc_capable) && (rx_conf->enable_lro))
+	if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		/*
 		 * Since NFS packets coalescing is not supported - clear
 		 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4610,7 +4714,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 
 	/* If LRO hasn't been requested - we are done here. */
-	if (!rx_conf->enable_lro)
+	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
 		return 0;
 
 	/* Set RDRXCTL.RSCACKC bit */
@@ -4730,7 +4834,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 * Configure CRC stripping, if any.
 	 */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->hw_strip_crc)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
 	else
 		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4738,7 +4842,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure jumbo frame support, if any.
 	 */
-	if (rx_conf->jumbo_frame == 1) {
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 		maxfrs &= 0x0000FFFF;
@@ -4758,6 +4862,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -4766,7 +4875,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 * call to configure.
 		 */
-		rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+		rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+				0 : ETHER_CRC_LEN;
 
 		/* Setup the Base and Length of the Rx Descriptor Rings */
 		bus_addr = rxq->rx_ring_phys_addr;
@@ -4780,28 +4890,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
 
 		/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
-		/*
-		 * Configure Header Split
-		 */
-		if (rx_conf->header_split) {
-			if (hw->mac.type == ixgbe_mac_82599EB) {
-				/* Must setup the PSRTYPE register */
-				uint32_t psrtype;
-
-				psrtype = IXGBE_PSRTYPE_TCPHDR |
-					IXGBE_PSRTYPE_UDPHDR   |
-					IXGBE_PSRTYPE_IPV4HDR  |
-					IXGBE_PSRTYPE_IPV6HDR;
-				IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
-			}
-			srrctl = ((rx_conf->split_hdr_size <<
-				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-				IXGBE_SRRCTL_BSIZEHDR_MASK);
-			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-		} else
-#endif
-			srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+		srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 
 		/* Set if packets are dropped when no descriptors available */
 		if (rxq->drop_en)
@@ -4827,9 +4916,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
 			dev->data->scattered_rx = 1;
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	}
 
-	if (rx_conf->enable_scatter)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
 	/*
@@ -4844,7 +4935,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->hw_ip_checksum)
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
 	else
 		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4854,7 +4945,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (hw->mac.type == ixgbe_mac_82599EB ||
 	    hw->mac.type == ixgbe_mac_X540) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->hw_strip_crc)
+		if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
 		else
 			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5260,6 +5351,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
@@ -5290,6 +5382,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw     *hw;
 	struct ixgbe_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	uint64_t bus_addr;
 	uint32_t srrctl, psrtype = 0;
 	uint16_t buf_size;
@@ -5329,6 +5422,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 	ixgbevf_rlpml_set_vf(hw,
 		(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
 
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
 	/* Setup RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
@@ -5352,18 +5450,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 
 
 		/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
-		/*
-		 * Configure Header Split
-		 */
-		if (dev->data->dev_conf.rxmode.header_split) {
-			srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
-				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-				IXGBE_SRRCTL_BSIZEHDR_MASK);
-			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-		} else
-#endif
-			srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+		srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 
 		/* Set if packets are dropped when no descriptors available */
 		if (rxq->drop_en)
@@ -5388,24 +5475,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
 				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-		if (dev->data->dev_conf.rxmode.enable_scatter ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
 		    /* It adds dual VLAN length for supporting dual VLAN */
-		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+		    (rxmode->max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->data->scattered_rx = 1;
 		}
-	}
 
-#ifdef RTE_HEADER_SPLIT_ENABLE
-	if (dev->data->dev_conf.rxmode.header_split)
-		/* Must setup the PSRTYPE register */
-		psrtype = IXGBE_PSRTYPE_TCPHDR |
-			IXGBE_PSRTYPE_UDPHDR   |
-			IXGBE_PSRTYPE_IPV4HDR  |
-			IXGBE_PSRTYPE_IPV6HDR;
-#endif
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	}
 
 	/* Set RQPL for VF RSS according to max Rx queue */
 	psrtype |= (dev->data->nb_rx_queues >> 1) <<
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ab5f01e..30095fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a..a97c271 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,17 +278,12 @@ static inline int
 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
 
 	/* no fdir support */
 	if (fconf->mode != RTE_FDIR_MODE_NONE)
 		return -1;
 
-	/* no header split support */
-	if (rxmode->header_split == 1)
-		return -1;
-
 	return 0;
 #else
 	RTE_SET_USED(dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998..edb1383 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
 	/* no csum error report support */
-	if (rxmode->hw_ip_checksum == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		return -1;
 
 	return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
-- 
2.9.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx offloads API
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
                         ` (2 preceding siblings ...)
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
@ 2018-03-22  3:41       ` Wei Dai
  2018-04-02 13:27       ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
  4 siblings, 0 replies; 28+ messages in thread
From: Wei Dai @ 2018-03-22  3:41 UTC (permalink / raw)
  To: konstantin.ananyev, wenzhuo.lu; +Cc: dev, Wei Dai

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 56 +++++++++++++---------------
 drivers/net/ixgbe/ixgbe_ipsec.c  |  5 ++-
 drivers/net/ixgbe/ixgbe_rxtx.c   | 79 ++++++++++++++++++++++++++++++++++++++--
 drivers/net/ixgbe/ixgbe_rxtx.h   |  3 ++
 4 files changed, 108 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 9437f05..6288690 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2337,6 +2337,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 		(struct ixgbe_adapter *)dev->data->dev_private;
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -2356,6 +2357,13 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	/* set flag to update link status after init */
 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3649,28 +3657,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3692,7 +3680,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -3776,12 +3766,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3803,7 +3789,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -4941,6 +4929,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 			(struct ixgbe_adapter *)dev->data->dev_private;
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
@@ -4953,6 +4942,13 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 29e4728..de7ed36 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+
 	/* sanity checks */
 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f6198f0..7511e18 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+	if ((txq->offloads == 0) &&
 #ifdef RTE_LIBRTE_SECURITY
 			!(txq->using_ipsec) &&
 #endif
@@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	} else {
 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
 		PMD_INIT_LOG(DEBUG,
-				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
-				(unsigned long)txq->txq_flags,
-				(unsigned long)IXGBE_SIMPLE_FLAGS);
+				" - offloads = 0x%" PRIx64,
+				txq->offloads);
 		PMD_INIT_LOG(DEBUG,
 				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
 				(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2409,60 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	}
 }
 
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+	return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
+	uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2426,6 +2479,22 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
+	 * Don't verify port offloads for application which
+	 * use the old API.
+	 */
+	if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64,
+			(void *)dev, tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			ixgbe_get_tx_queue_offloads(dev),
+			ixgbe_get_tx_port_offloads(dev));
+		return -ENOTSUP;
+	}
+
+	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
@@ -2551,6 +2620,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
+	txq->offloads = tx_conf->offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -5371,6 +5441,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
 	qinfo->conf.txq_flags = txq->txq_flags;
+	qinfo->conf.offloads = txq->offloads;
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 30095fa..642cf4d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -307,8 +308,10 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
 
 #endif /* RTE_IXGBE_INC_VECTOR */
 #endif /* _IXGBE_RXTX_H_ */
-- 
2.9.5

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new offloads API
  2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
                         ` (3 preceding siblings ...)
  2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx " Wei Dai
@ 2018-04-02 13:27       ` Zhang, Qi Z
  2018-04-03 15:14         ` Zhang, Helin
  4 siblings, 1 reply; 28+ messages in thread
From: Zhang, Qi Z @ 2018-04-02 13:27 UTC (permalink / raw)
  To: Dai, Wei, Ananyev, Konstantin, Lu, Wenzhuo; +Cc: dev, Dai, Wei

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Dai
> Sent: Thursday, March 22, 2018 11:41 AM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> Subject: [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new offloads API
> 
> This patch set adds support of per queue VLAN strip offloading in ixgbe PF and
> VF.
> This patch support new offloads API in ixgbe PF and VF.
> 
> ---
> v4: don't support header spliting any more
> 
> v3: Rx header spliting capability is only enabled in
>     #ifdef RTE_HEADER_SPLIT_ENABLE.
>     Tx vector tranmit function only work without any Tx offloads.
> 
> v2: improve error checking
> 
> Wei Dai (4):
>   net/ixgbe: support VLAN strip per queue offloading in PF
>   net/ixgbe: support VLAN strip per queue offloading in VF
>   net/ixgbe: convert to new Rx offloads API
>   net/ixgbe: convert to new Tx offloads API
> 
>  drivers/net/ixgbe/ixgbe_ethdev.c          | 264
> ++++++++++++++--------------
>  drivers/net/ixgbe/ixgbe_ethdev.h          |   4 +-
>  drivers/net/ixgbe/ixgbe_ipsec.c           |  13 +-
>  drivers/net/ixgbe/ixgbe_pf.c              |   5 +-
>  drivers/net/ixgbe/ixgbe_rxtx.c            | 275
> +++++++++++++++++++++++-------
>  drivers/net/ixgbe/ixgbe_rxtx.h            |   7 +
>  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   5 -
>  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
>  8 files changed, 359 insertions(+), 216 deletions(-)
> 
> --
> 2.9.5

It's better to only enable per queue vlan-strip without convert to new offload in patch 1,2
then convert all to new offload in patch 3,4
But still ok for me.

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new offloads API
  2018-04-02 13:27       ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
@ 2018-04-03 15:14         ` Zhang, Helin
  0 siblings, 0 replies; 28+ messages in thread
From: Zhang, Helin @ 2018-04-03 15:14 UTC (permalink / raw)
  To: Zhang, Qi Z, Dai, Wei, Ananyev, Konstantin, Lu, Wenzhuo; +Cc: dev, Dai, Wei



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Monday, April 2, 2018 9:27 PM
> To: Dai, Wei; Ananyev, Konstantin; Lu, Wenzhuo
> Cc: dev@dpdk.org; Dai, Wei
> Subject: Re: [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new offloads API
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Dai
> > Sent: Thursday, March 22, 2018 11:41 AM
> > To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Lu, Wenzhuo
> > <wenzhuo.lu@intel.com>
> > Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> > Subject: [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new offloads
> > API
> >
> > This patch set adds support of per queue VLAN strip offloading in
> > ixgbe PF and VF.
> > This patch support new offloads API in ixgbe PF and VF.
> >
> > ---
> > v4: don't support header spliting any more
> >
> > v3: Rx header spliting capability is only enabled in
> >     #ifdef RTE_HEADER_SPLIT_ENABLE.
> >     Tx vector tranmit function only work without any Tx offloads.
> >
> > v2: improve error checking
> >
> > Wei Dai (4):
> >   net/ixgbe: support VLAN strip per queue offloading in PF
> >   net/ixgbe: support VLAN strip per queue offloading in VF
> >   net/ixgbe: convert to new Rx offloads API
> >   net/ixgbe: convert to new Tx offloads API
> >
> >  drivers/net/ixgbe/ixgbe_ethdev.c          | 264
> > ++++++++++++++--------------
> >  drivers/net/ixgbe/ixgbe_ethdev.h          |   4 +-
> >  drivers/net/ixgbe/ixgbe_ipsec.c           |  13 +-
> >  drivers/net/ixgbe/ixgbe_pf.c              |   5 +-
> >  drivers/net/ixgbe/ixgbe_rxtx.c            | 275
> > +++++++++++++++++++++++-------
> >  drivers/net/ixgbe/ixgbe_rxtx.h            |   7 +
> >  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   5 -
> >  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
> >  8 files changed, 359 insertions(+), 216 deletions(-)
> >
> > --
> > 2.9.5
> 
> It's better to only enable per queue vlan-strip without convert to new offload
> in patch 1,2 then convert all to new offload in patch 3,4 But still ok for me.
> 
> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Series applied to dpdk-next-net-intel, thanks!
/Helin

^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2018-04-03 15:15 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new offloads API Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-14 23:18   ` Ananyev, Konstantin
2018-03-19  6:24     ` Dai, Wei
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-14 21:47     ` Ananyev, Konstantin
2018-03-19  3:15       ` Dai, Wei
2018-03-20 11:53         ` Ananyev, Konstantin
2018-03-21 14:03           ` Dai, Wei
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-04-02 13:27       ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
2018-04-03 15:14         ` Zhang, Helin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).