DPDK patches and discussions
 help / color / mirror / Atom feed
From: Helin Zhang <helin.zhang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 3/5] ixgbe: rework of updating/querying redirection table
Date: Fri, 22 Aug 2014 16:26:07 +0800	[thread overview]
Message-ID: <1408695969-9774-4-git-send-email-helin.zhang@intel.com> (raw)
In-Reply-To: <1408695969-9774-1-git-send-email-helin.zhang@intel.com>

As ethdev has been changed to support multiple sizes
of redirection table, the functions of updating/querying
redirection table need to be reworked. In addition,
getting the redirection table size is supported in ops
of 'dev_infos_get'.

Signed-off-by: Helin Zhang <helin.zhang@intel.com>
Reviewed-by: Jijiang Liu <jijiang.liu@intel.com>
Reviewed-by: Cunming Liang <cunming.liang@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 136 +++++++++++++++++++++++-------------
 1 file changed, 87 insertions(+), 49 deletions(-)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 59122a1..4f036ec 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -118,8 +118,9 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 					     uint8_t stat_idx,
 					     uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
-				struct rte_eth_dev_info *dev_info);
-
+			       struct rte_eth_dev_info *dev_info);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+				 struct rte_eth_dev_info *dev_info);
 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
@@ -144,9 +145,11 @@ static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 		struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-		struct rte_eth_rss_reta *reta_conf);
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size);
 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-		struct rte_eth_rss_reta *reta_conf);
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -377,7 +380,7 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
 	.stats_get            = ixgbevf_dev_stats_get,
 	.stats_reset          = ixgbevf_dev_stats_reset,
 	.dev_close            = ixgbevf_dev_close,
-	.dev_infos_get        = ixgbe_dev_info_get,
+	.dev_infos_get        = ixgbevf_dev_info_get,
 	.mtu_set              = ixgbevf_dev_set_mtu,
 	.vlan_filter_set      = ixgbevf_vlan_filter_set,
 	.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -1948,6 +1951,35 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
 		DEV_TX_OFFLOAD_TCP_CKSUM   |
 		DEV_TX_OFFLOAD_SCTP_CKSUM;
+	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+		     struct rte_eth_dev_info *dev_info)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+	dev_info->max_vfs = dev->pci_dev->max_vfs;
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		dev_info->max_vmdq_pools = ETH_16_POOLS;
+	else
+		dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+				DEV_RX_OFFLOAD_IPV4_CKSUM |
+				DEV_RX_OFFLOAD_UDP_CKSUM  |
+				DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+				DEV_TX_OFFLOAD_IPV4_CKSUM  |
+				DEV_TX_OFFLOAD_UDP_CKSUM   |
+				DEV_TX_OFFLOAD_TCP_CKSUM   |
+				DEV_TX_OFFLOAD_SCTP_CKSUM;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -2624,38 +2656,41 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
 
 static int
 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-				struct rte_eth_rss_reta *reta_conf)
+			  struct rte_eth_rss_reta_entry64 *reta_conf,
+			  uint16_t reta_size)
 {
 	uint8_t i,j,mask;
-	uint32_t reta;
+	uint32_t reta, r;
+	uint16_t idx, shift;
 	struct ixgbe_hw *hw =
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
-	/*
-	* Update Redirection Table RETA[n],n=0...31,The redirection table has
-	* 128-entries in 32 registers
-	 */
-	for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-		if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
-			mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
+	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+			"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < reta_size; i += 4) {
+		idx = i / RTE_BIT_WIDTH_64;
+		shift = i % RTE_BIT_WIDTH_64;
+		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xf);
+		if (!mask)
+			continue;
+		if (mask == 0xf)
+			r = 0;
 		else
-			mask = (uint8_t)((reta_conf->mask_hi >>
-				(i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-		if (mask != 0) {
-			reta = 0;
-			if (mask != 0xF)
-				reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-
-			for (j = 0; j < 4; j++) {
-				if (mask & (0x1 << j)) {
-					if (mask != 0xF)
-						reta &= ~(0xFF << 8 * j);
-					reta |= reta_conf->reta[i + j] << 8*j;
-				}
-			}
-			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta);
+			r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+		for (j = 0, reta = 0; j < 4; j++) {
+			if (mask & (0x1 << j))
+				reta |= reta_conf[idx].reta[shift + j] <<
+							(CHAR_BIT * j);
+			else
+				reta |= r & (0xff << (CHAR_BIT * j));
 		}
+		IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
 	}
 
 	return 0;
@@ -2663,32 +2698,35 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 static int
 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-				struct rte_eth_rss_reta *reta_conf)
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 uint16_t reta_size)
 {
-	uint8_t i,j,mask;
+	uint8_t i, j, mask;
 	uint32_t reta;
+	uint16_t idx, shift;
 	struct ixgbe_hw *hw =
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
-	/*
-	 * Read Redirection Table RETA[n],n=0...31,The redirection table has
-	 * 128-entries in 32 registers
-	 */
-	for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-		if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
-			mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-		else
-			mask = (uint8_t)((reta_conf->mask_hi >>
-				(i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-
-		if (mask != 0) {
-			reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-			for (j = 0; j < 4; j++) {
-				if (mask & (0x1 << j))
-					reta_conf->reta[i + j] =
-						(uint8_t)((reta >> 8 * j) & 0xFF);
-			}
+	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+				"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += 4) {
+		idx = i / RTE_BIT_WIDTH_64;
+		shift = i % RTE_BIT_WIDTH_64;
+		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xf);
+		if (!mask)
+			continue;
+
+		reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+		for (j = 0; j < 4; j++) {
+			if (mask & (0x1 << j))
+				reta_conf[idx].reta[shift + j] =
+					((reta >> (CHAR_BIT * j)) & 0xff);
 		}
 	}
 
-- 
1.8.1.4

  parent reply	other threads:[~2014-08-22  8:22 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-22  8:26 [dpdk-dev] [PATCH 0/5] support of multiple sizes of " Helin Zhang
2014-08-22  8:26 ` [dpdk-dev] [PATCH 1/5] ethdev: " Helin Zhang
2014-08-22  8:26 ` [dpdk-dev] [PATCH 2/5] e1000: rework of updating/querying " Helin Zhang
2014-08-22  8:26 ` Helin Zhang [this message]
2014-08-22  8:26 ` [dpdk-dev] [PATCH 4/5] i40e: " Helin Zhang
2014-08-22  8:26 ` [dpdk-dev] [PATCH 5/5] app/testpmd: " Helin Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1408695969-9774-4-git-send-email-helin.zhang@intel.com \
    --to=helin.zhang@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).