From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 9C6148DAE for ; Fri, 16 Oct 2015 15:05:51 +0200 (CEST) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga103.jf.intel.com with ESMTP; 16 Oct 2015 06:05:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.17,689,1437462000"; d="scan'208";a="665699042" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga003.jf.intel.com with ESMTP; 16 Oct 2015 06:05:50 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t9GD5m5S026105; Fri, 16 Oct 2015 21:05:48 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t9GD5js1012841; Fri, 16 Oct 2015 21:05:47 +0800 Received: (from wenzhuol@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t9GD5j4n012837; Fri, 16 Oct 2015 21:05:45 +0800 From: Wenzhuo Lu To: dev@dpdk.org Date: Fri, 16 Oct 2015 21:05:38 +0800 Message-Id: <1445000741-12799-2-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1445000741-12799-1-git-send-email-wenzhuo.lu@intel.com> References: <1443426751-4906-1-git-send-email-wenzhuo.lu@intel.com> <1445000741-12799-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH v2 1/4] ixgbe: 512 entries RSS table on x550 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 16 Oct 2015 13:05:52 -0000 Comparing with the older NICs, x550's RSS redirection table is enlarged to 512 entries. As the original code is for the NICs which have a 128 entries RSS table, it means only part of the RSS table is set on x550. So, RSS cannot work as expected on x550, it doesn't redirect the packets evenly. This patch configs the entries beyond 128 on x550 to let RSS work well, and also update the query and update functions to support 512 entries. Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_ethdev.c | 57 +++++++++++++++++++++++++++++++++------- drivers/net/ixgbe/ixgbe_ethdev.h | 4 +++ drivers/net/ixgbe/ixgbe_rxtx.c | 10 +++++-- 3 files changed, 60 insertions(+), 11 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index ec2918c..480891d 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -2397,7 +2397,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) ETH_TXQ_FLAGS_NOOFFLOADS, }; dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); - dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; } @@ -3203,12 +3203,15 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, uint32_t reta, r; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)\n", reta_size, sp_reta_size); return -EINVAL; } @@ -3219,10 +3222,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, IXGBE_4_BIT_MASK); if (!mask) continue; + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); if (mask == IXGBE_4_BIT_MASK) r = 0; else - r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + r = IXGBE_READ_REG(hw, reta_reg); for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { if (mask & (0x1 << j)) reta |= reta_conf[idx].reta[shift + j] << @@ -3231,7 +3235,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, reta |= r & (IXGBE_8_BIT_MASK << (CHAR_BIT * j)); } - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + IXGBE_WRITE_REG(hw, reta_reg, reta); } return 0; @@ -3246,16 +3250,19 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, uint32_t reta; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)\n", reta_size, sp_reta_size); return -EINVAL; } - for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) { + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { idx = i / RTE_RETA_GROUP_SIZE; shift = i % RTE_RETA_GROUP_SIZE; mask = (uint8_t)((reta_conf[idx].mask >> shift) & @@ -3263,7 +3270,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, if (!mask) continue; - reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + reta = IXGBE_READ_REG(hw, reta_reg); for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { if (mask & (0x1 << j)) reta_conf[idx].reta[shift + j] = @@ -5473,6 +5481,37 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev, return eeprom->ops.write_buffer(hw, first, length, data); } +uint16_t +ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return ETH_RSS_RETA_SIZE_512; + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + return ETH_RSS_RETA_SIZE_64; + default: + return ETH_RSS_RETA_SIZE_128; + } +} + +uint32_t +ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (reta_idx < ETH_RSS_RETA_SIZE_128) + return IXGBE_RETA(reta_idx >> 2); + else + return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + return IXGBE_VFRETA(reta_idx >> 2); + default: + return IXGBE_RETA(reta_idx >> 2); + } +} + static struct rte_driver rte_ixgbe_driver = { .type = PMD_PDEV, .init = rte_ixgbe_pmd_init, diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index c3d4f4f..0c669cd 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -377,6 +377,10 @@ int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type); + +uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx); + /* * Flow director function prototypes */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index a598a72..494a6be 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2793,22 +2793,28 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta; uint16_t i; uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + /* * Fill in redirection table * The byte-swap is needed because NIC registers are in * little-endian order. */ reta = 0; - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (j == dev->data->nb_rx_queues) j = 0; reta = (reta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), + IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); } -- 1.9.3