From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 9F11F7EC7 for ; Wed, 22 Oct 2014 13:45:35 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga103.jf.intel.com with ESMTP; 22 Oct 2014 04:52:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,768,1406617200"; d="scan'208";a="593702109" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga001.jf.intel.com with ESMTP; 22 Oct 2014 04:53:54 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s9MBrquk020704; Wed, 22 Oct 2014 19:53:52 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s9MBrojP024700; Wed, 22 Oct 2014 19:53:52 +0800 Received: (from hzhan75@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s9MBrolr024696; Wed, 22 Oct 2014 19:53:50 +0800 From: Helin Zhang To: dev@dpdk.org Date: Wed, 22 Oct 2014 19:53:30 +0800 Message-Id: <1413978810-24610-9-git-send-email-helin.zhang@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1413978810-24610-1-git-send-email-helin.zhang@intel.com> References: <1411634427-746-1-git-send-email-helin.zhang@intel.com> <1413978810-24610-1-git-send-email-helin.zhang@intel.com> Subject: [dpdk-dev] [PATCH v3 8/8] i40evf: support of updating/querying redirection table X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 22 Oct 2014 11:45:36 -0000 Support of updating/querying redirection table has been added for VF. Signed-off-by: Helin Zhang --- lib/librte_pmd_i40e/i40e_ethdev_vf.c | 89 ++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) v2 changes: * Add support of updating/querying i40e reta of VF. diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c index a381521..0e8693d 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c +++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c @@ -134,6 +134,12 @@ static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); static int i40evf_config_rss(struct i40e_vf *vf); static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); @@ -166,6 +172,8 @@ static struct eth_dev_ops i40evf_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, + .reta_update = i40evf_dev_rss_reta_update, + .reta_query = i40evf_dev_rss_reta_query, .rss_hash_update = i40evf_dev_rss_hash_update, .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, }; @@ -1611,6 +1619,87 @@ i40evf_dev_close(struct rte_eth_dev *dev) } static int +i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t lut, l; + uint16_t i, j; + uint16_t idx, shift; + uint8_t mask; + + if (reta_size != ETH_RSS_RETA_SIZE_64) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can" + "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) { + idx = i / RTE_BIT_WIDTH_64; + shift = i % RTE_BIT_WIDTH_64; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + I40E_4_BIT_MASK); + if (!mask) + continue; + if (mask == I40E_4_BIT_MASK) + l = 0; + else + l = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2)); + + for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + lut |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j)); + } + I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut); + } + + return 0; +} + +static int +i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t lut; + uint16_t i, j; + uint16_t idx, shift; + uint8_t mask; + + if (reta_size != ETH_RSS_RETA_SIZE_64) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can" + "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) { + idx = i / RTE_BIT_WIDTH_64; + shift = i % RTE_BIT_WIDTH_64; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + I40E_4_BIT_MASK); + if (!mask) + continue; + + lut = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2)); + for (j = 0; j < I40E_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift] = + ((lut >> (CHAR_BIT * j)) & + I40E_8_BIT_MASK); + } + } + + return 0; +} + +static int i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf) { uint32_t *hash_key; -- 1.8.1.4