From: Wei Dai <wei.dai@intel.com>
To: konstantin.ananyev@intel.com, wenzhuo.lu@intel.com
Cc: dev@dpdk.org, Wei Dai <wei.dai@intel.com>
Subject: [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API
Date: Mon, 19 Mar 2018 15:04:22 +0800 [thread overview]
Message-ID: <1521443063-57722-4-git-send-email-wei.dai@intel.com> (raw)
In-Reply-To: <1521443063-57722-1-git-send-email-wei.dai@intel.com>
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.
Signed-off-by: Wei Dai <wei.dai@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 93 +++++++++--------
drivers/net/ixgbe/ixgbe_ipsec.c | 8 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 165 +++++++++++++++++++++++++++---
drivers/net/ixgbe/ixgbe_rxtx.h | 3 +
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +-
6 files changed, 207 insertions(+), 66 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bb67ba..9437f05 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
static int
ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
if (mask & ETH_VLAN_STRIP_MASK) {
ixgbe_vlan_hw_strip_config(dev);
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
@@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rx_offloads;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ ixgbe_dev_info_get(dev, &dev_info);
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
-
- /*
- * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
- * mode.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
- !RTE_ETH_DEV_SRIOV(dev).active)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx) {
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+ if (dev->security_ctx)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
- }
#endif
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rx_offloads;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
+ ixgbevf_dev_info_get(dev, &dev_info);
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
IXGBE_MMW_SIZE_JUMBO_FRAME);
else
@@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0f..29e4728 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
+ uint64_t rx_offloads;
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
/* sanity checks */
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
@@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5c45eb4..5e8635b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2769,6 +2769,100 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
#endif
}
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+#ifdef RTE_HEADER_SPLIT_ENABLE
+ offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
+#endif
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
+static int
+ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
+ uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
+
+ if ((requested & (queue_supported | port_supported)) != requested)
+ return 0;
+
+ if ((port_offloads ^ requested) & port_supported)
+ return 0;
+
+ return 1;
+}
+
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2787,6 +2881,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported port offloads 0x%" PRIx64
+ " or supported queue offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ ixgbe_get_rx_port_offloads(dev),
+ ixgbe_get_rx_queue_offloads(dev));
+ return -ENOTSUP;
+ }
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2816,8 +2922,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = rx_conf->offloads;
@@ -4575,7 +4681,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && rx_conf->enable_lro) {
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
@@ -4583,7 +4689,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
@@ -4597,7 +4704,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- if ((rsc_capable) && (rx_conf->enable_lro))
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
/*
* Since NFS packets coalescing is not supported - clear
* RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4610,7 +4717,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!rx_conf->enable_lro)
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
@@ -4730,7 +4837,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4738,7 +4845,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->jumbo_frame == 1) {
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
@@ -4758,6 +4865,12 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -4766,7 +4879,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+ rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+ 0 : ETHER_CRC_LEN;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4784,7 +4898,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure Header Split
*/
- if (rx_conf->header_split) {
+ if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+ /* add Header Split flag for set_rx_function( ) */
+ rx_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
@@ -4827,9 +4943,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->enable_scatter)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
@@ -4844,7 +4962,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->hw_ip_checksum)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4854,7 +4972,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5260,6 +5378,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -5290,6 +5409,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
@@ -5329,6 +5449,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
ixgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -5356,7 +5482,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure Header Split
*/
- if (dev->data->dev_conf.rxmode.header_split) {
+ if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
+ /* add Header Split flag for set_rx_function( ) */
+ rxmode->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK);
@@ -5388,18 +5516,21 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ (rxmode->max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
#ifdef RTE_HEADER_SPLIT_ENABLE
- if (dev->data->dev_conf.rxmode.header_split)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
/* Must setup the PSRTYPE register */
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ab5f01e..30095fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+
#endif /* RTE_IXGBE_INC_VECTOR */
#endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a..d3eb060 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -286,7 +286,7 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
return -1;
/* no header split support */
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998..edb1383 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
/* no csum error report support */
- if (rxmode->hw_ip_checksum == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
return -1;
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
--
2.7.5
next prev parent reply other threads:[~2018-03-19 7:22 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new " Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-14 23:18 ` Ananyev, Konstantin
2018-03-19 6:24 ` Dai, Wei
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-14 21:47 ` Ananyev, Konstantin
2018-03-19 3:15 ` Dai, Wei
2018-03-20 11:53 ` Ananyev, Konstantin
2018-03-21 14:03 ` Dai, Wei
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-19 7:04 ` Wei Dai [this message]
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx offloads API Wei Dai
2018-03-22 3:40 ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-04-02 13:27 ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
2018-04-03 15:14 ` Zhang, Helin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1521443063-57722-4-git-send-email-wei.dai@intel.com \
--to=wei.dai@intel.com \
--cc=dev@dpdk.org \
--cc=konstantin.ananyev@intel.com \
--cc=wenzhuo.lu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).