From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id C11D05B18 for ; Wed, 7 Mar 2018 14:24:20 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 07 Mar 2018 05:24:20 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.47,435,1515484800"; d="scan'208";a="36683383" Received: from dpdk6.bj.intel.com ([172.16.182.119]) by orsmga001.jf.intel.com with ESMTP; 07 Mar 2018 05:24:19 -0800 From: Wei Dai To: wenzhuo.lu@intel.com, konstantin.ananyev@intel.com Cc: dev@dpdk.org, Wei Dai Date: Wed, 7 Mar 2018 21:06:29 +0800 Message-Id: <1520427990-73471-4-git-send-email-wei.dai@intel.com> X-Mailer: git-send-email 2.7.5 In-Reply-To: <1520427990-73471-1-git-send-email-wei.dai@intel.com> References: <1519747291-6969-1-git-send-email-wei.dai@intel.com> <1520427990-73471-1-git-send-email-wei.dai@intel.com> Subject: [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 07 Mar 2018 13:24:21 -0000 Ethdev Rx offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This commit support the new Rx offloads API. Signed-off-by: Wei Dai --- drivers/net/ixgbe/ixgbe_ethdev.c | 93 +++++++++-------- drivers/net/ixgbe/ixgbe_ipsec.c | 8 +- drivers/net/ixgbe/ixgbe_rxtx.c | 163 ++++++++++++++++++++++++++---- drivers/net/ixgbe/ixgbe_rxtx.h | 3 + drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 2 +- drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +- 6 files changed, 205 insertions(+), 66 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 8bb67ba..9437f05 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_STRIP_MASK) { ixgbe_vlan_hw_strip_config(dev); } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ixgbe_vlan_hw_extend_enable(dev); else ixgbe_vlan_hw_extend_disable(dev); @@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_dev_info dev_info; + uint64_t rx_offloads; int ret; PMD_INIT_FUNC_TRACE(); @@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) return ret; } + ixgbe_dev_info_get(dev, &dev_info); + rx_offloads = dev->data->dev_conf.rxmode.offloads; + if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) { + PMD_DRV_LOG(ERR, "Some Rx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + rx_offloads, dev_info.rx_offload_capa); + return -ENOTSUP; + } + /* set flag to update link status after init */ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) else dev_info->max_vmdq_pools = ETH_64_POOLS; dev_info->vmdq_queue_num = dev_info->max_rx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - - /* - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV - * mode. - */ - if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && - !RTE_ETH_DEV_SRIOV(dev).active) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | @@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; #ifdef RTE_LIBRTE_SECURITY - if (dev->security_ctx) { - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + if (dev->security_ctx) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; - } #endif dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) struct rte_eth_conf *conf = &dev->data->dev_conf; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_dev_info dev_info; + uint64_t rx_offloads; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); + ixgbevf_dev_info_get(dev, &dev_info); + rx_offloads = dev->data->dev_conf.rxmode.offloads; + if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) { + PMD_DRV_LOG(ERR, "Some Rx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + rx_offloads, dev_info.rx_offload_capa); + return -ENOTSUP; + } + /* * VF has no ability to enable/disable HW CRC * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; uint32_t rf_dec, rf_int; uint32_t bcnrc_val; uint16_t link_speed = dev->data->dev_link.link_speed; @@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, bcnrc_val = 0; } + rxmode = &dev->data->dev_conf.rxmode; /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise * set as 0x4. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); else @@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* refuse mtu that requires the support of scattered packets when this * feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) return -EINVAL; diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c index 176ec0f..29e4728 100644 --- a/drivers/net/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ixgbe/ixgbe_ipsec.c @@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reg; + uint64_t rx_offloads; + rx_offloads = dev->data->dev_conf.rxmode.offloads; /* sanity checks */ - if (dev->data->dev_conf.rxmode.enable_lro) { + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); return -1; } - if (!dev->data->dev_conf.rxmode.hw_strip_crc) { + if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); return -1; } @@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); if (reg != 0) { diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 5c45eb4..a5d4822 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) #endif } +static int +ixgbe_is_vf(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return 1; + default: + return 0; + } +} + +uint64_t +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = DEV_RX_OFFLOAD_HEADER_SPLIT; + if (hw->mac.type != ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + return offloads; +} + +uint64_t +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; + + if (hw->mac.type == ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + if (ixgbe_is_vf(dev) == 0) + offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND); + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) && + !RTE_ETH_DEV_SRIOV(dev).active) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + + return offloads; +} + +static int +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev); + uint64_t port_supported = ixgbe_get_rx_port_offloads(dev); + + if ((requested & (queue_supported | port_supported)) != requested) + return 0; + + if ((port_offloads ^ requested) & port_supported) + return 0; + + return 1; +} + int __attribute__((cold)) ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2787,6 +2879,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) { + PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported port offloads 0x%" PRIx64 + " or supported queue offloads 0x%" PRIx64, + (void *)dev, rx_conf->offloads, + dev->data->dev_conf.rxmode.offloads, + ixgbe_get_rx_port_offloads(dev), + ixgbe_get_rx_queue_offloads(dev)); + return -ENOTSUP; + } + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2816,8 +2920,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_deferred_start = rx_conf->rx_deferred_start; rxq->offloads = rx_conf->offloads; @@ -4575,7 +4679,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) rsc_capable = true; - if (!rsc_capable && rx_conf->enable_lro) { + if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " "support it"); return -EINVAL; @@ -4583,7 +4687,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ - if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) { + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) && + (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { /* * According to chapter of 4.6.7.2.1 of the Spec Rev. * 3.0 RSC configuration requires HW CRC stripping being @@ -4597,7 +4702,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); - if ((rsc_capable) && (rx_conf->enable_lro)) + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) /* * Since NFS packets coalescing is not supported - clear * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is @@ -4610,7 +4715,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ - if (!rx_conf->enable_lro) + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) return 0; /* Set RDRXCTL.RSCACKC bit */ @@ -4730,7 +4835,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Configure CRC stripping, if any. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (rx_conf->hw_strip_crc) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; else hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; @@ -4738,7 +4843,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Configure jumbo frame support, if any. */ - if (rx_conf->jumbo_frame == 1) { + if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { hlreg0 |= IXGBE_HLREG0_JUMBOEN; maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); maxfrs &= 0x0000FFFF; @@ -4758,6 +4863,12 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rx_conf->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_VLAN_STRIP); /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -4766,7 +4877,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure. */ - rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; + rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ? + 0 : ETHER_CRC_LEN; /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; @@ -4784,7 +4896,9 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Configure Header Split */ - if (rx_conf->header_split) { + if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) { + /* add Header Split flag for set_rx_function( ) */ + rx_conf->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT; if (hw->mac.type == ixgbe_mac_82599EB) { /* Must setup the PSRTYPE register */ uint32_t psrtype; @@ -4827,9 +4941,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE > buf_size) dev->data->scattered_rx = 1; + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; } - if (rx_conf->enable_scatter) + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) dev->data->scattered_rx = 1; /* @@ -4844,7 +4960,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - if (rx_conf->hw_ip_checksum) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= IXGBE_RXCSUM_IPPCSE; else rxcsum &= ~IXGBE_RXCSUM_IPPCSE; @@ -4854,7 +4970,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - if (rx_conf->hw_strip_crc) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; else rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; @@ -5260,6 +5376,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -5290,6 +5407,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; struct ixgbe_rx_queue *rxq; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; uint64_t bus_addr; uint32_t srrctl, psrtype = 0; uint16_t buf_size; @@ -5329,6 +5447,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) ixgbevf_rlpml_set_vf(hw, (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rxmode->offloads &= ~(DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_VLAN_STRIP); /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -5356,7 +5480,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* * Configure Header Split */ - if (dev->data->dev_conf.rxmode.header_split) { + if (rxq->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) { + /* add Header Split flag for set_rx_function( ) */ + rxmode->offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT; srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK); @@ -5388,18 +5514,21 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.enable_scatter || + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER || /* It adds dual VLAN length for supporting dual VLAN */ - (dev->data->dev_conf.rxmode.max_rx_pkt_len + + (rxmode->max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; } + + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; } #ifdef RTE_HEADER_SPLIT_ENABLE - if (dev->data->dev_conf.rxmode.header_split) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) /* Must setup the PSRTYPE register */ psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index ab5f01e..30095fa 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -307,5 +307,8 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); +uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); + #endif /* RTE_IXGBE_INC_VECTOR */ #endif /* _IXGBE_RXTX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h index 414840a..d3eb060 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -286,7 +286,7 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) return -1; /* no header split support */ - if (rxmode->header_split == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) return -1; return 0; diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index e0f9998..edb1383 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; /* no csum error report support */ - if (rxmode->hw_ip_checksum == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) return -1; return ixgbe_rx_vec_dev_conf_condition_check_default(dev); -- 2.7.5