DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ouyang Changchun <changchun.ouyang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 2/3] ixgbe: Implement the functionality of setting TX rate for queue or VF in IXGBE PMD
Date: Mon, 26 May 2014 15:45:30 +0800	[thread overview]
Message-ID: <1401090331-18455-3-git-send-email-changchun.ouyang@intel.com> (raw)
In-Reply-To: <1401090331-18455-1-git-send-email-changchun.ouyang@intel.com>

This patch implements the functionality of setting TX rate for queue or VF in IXGBE PMD.

Signed-off-by: Ouyang Changchun <changchun.ouyang@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 122 ++++++++++++++++++++++++++++++++++++
 lib/librte_pmd_ixgbe/ixgbe_ethdev.h |  13 +++-
 2 files changed, 132 insertions(+), 3 deletions(-)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index c9b5fe4..643477a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -87,6 +87,8 @@
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 
+#define IXGBE_MMW_SIZE_DEFAULT        0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
 
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
@@ -182,6 +184,10 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
 		uint8_t	rule_id);
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+		uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+		uint16_t tx_rate, uint64_t q_msk);
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
@@ -280,6 +286,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.set_vf_rx            = ixgbe_set_pool_rx,
 	.set_vf_tx            = ixgbe_set_pool_tx,
 	.set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
+	.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+	.set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
 	.fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
 	.fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
 	.fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
@@ -1288,10 +1296,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
 	int err, link_up = 0, negotiate = 0;
 	uint32_t speed = 0;
 	int mask = 0;
 	int status;
+	uint16_t vf, idx;
 	
 	PMD_INIT_FUNC_TRACE();
 
@@ -1408,6 +1419,16 @@ skip_link_setup:
 			goto error;
 	}
 
+	/* Restore vf rate limit */
+	if (vfinfo != NULL) {
+		for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+			for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+				if (vfinfo[vf].tx_rate[idx] != 0)
+					ixgbe_set_vf_rate_limit(dev, vf,
+						vfinfo[vf].tx_rate[idx],
+						1 << idx);
+	}
+
 	ixgbe_restore_statistics_mapping(dev);
 
 	return (0);
@@ -3062,6 +3083,107 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 	return 0;
 }
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+	uint16_t queue_idx, uint16_t tx_rate)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t rf_dec, rf_int;
+	uint32_t bcnrc_val;
+	uint16_t link_speed = dev->data->dev_link.link_speed;
+
+	if (queue_idx >= hw->mac.max_tx_queues)
+		return -EINVAL;
+
+	if (tx_rate != 0) {
+		/* Calculate the rate factor values to set */
+		rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+		rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+		rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+		bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+		bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+				IXGBE_RTTBCNRC_RF_INT_MASK_M);
+		bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+	} else {
+		bcnrc_val = 0;
+	}
+
+	/*
+	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+	 * set as 0x4.
+	 */
+	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+				IXGBE_MAX_JUMBO_FRAME_SIZE))
+		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+			IXGBE_MMW_SIZE_JUMBO_FRAME);
+	else
+		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+			IXGBE_MMW_SIZE_DEFAULT);
+
+	/* Set RTTBCNRC of queue X */
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+	uint16_t tx_rate, uint64_t q_msk)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	uint32_t queue_stride =
+		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+	uint16_t total_rate = 0;
+
+	if (queue_end >= hw->mac.max_tx_queues)
+		return -EINVAL;
+
+	if (vfinfo != NULL) {
+		for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+			if (vf_idx == vf)
+				continue;
+			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+				idx++)
+				total_rate += vfinfo[vf_idx].tx_rate[idx];
+		}
+	} else
+		return -EINVAL;
+
+	/* Store tx_rate for this vf. */
+	for (idx = 0; idx < nb_q_per_pool; idx++) {
+		if (((uint64_t)0x1 << idx) & q_msk) {
+			if (vfinfo[vf].tx_rate[idx] != tx_rate)
+				vfinfo[vf].tx_rate[idx] = tx_rate;
+			total_rate += tx_rate;
+		}
+	}
+
+	if (total_rate > dev->data->dev_link.link_speed) {
+		/*
+		 * Reset stored TX rate of the VF if it causes exceed
+		 * link speed.
+		 */
+		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+		return -EINVAL;
+	}
+
+	/* Set RTTBCNRC of each queue/pool for vf X  */
+	for (; queue_idx <= queue_end; queue_idx++) {
+		if (0x1 & q_msk)
+			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+		q_msk = q_msk >> 1;
+	}
+
+	return 0;
+}
 static struct rte_driver rte_ixgbe_driver = {
 	.type = PMD_PDEV,
 	.init = rte_ixgbe_pmd_init,
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index 9d7e93f..4d11105 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -64,9 +64,16 @@
 
 /* Loopback operation modes */
 /* 82599 specific loopback operation types */
-#define IXGBE_LPBK_82599_NONE		0x0 /* Default value. Loopback is disabled. */
-#define IXGBE_LPBK_82599_TX_RX		0x1 /* Tx->Rx loopback operation is enabled. */
+#define IXGBE_LPBK_82599_NONE   0x0 /* Default value. Loopback is disabled. */
+#define IXGBE_LPBK_82599_TX_RX  0x1 /* Tx->Rx loopback operation is enabled. */
 
+#define IXGBE_MAX_JUMBO_FRAME_SIZE      0x2600 /* Maximum Jumbo frame size. */
+
+#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF
+#define IXGBE_RTTBCNRC_RF_INT_MASK_M \
+	(IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+
+#define IXGBE_MAX_QUEUE_NUM_PER_VF  8
 /*
  * Information about the fdir mode.
  */
@@ -125,7 +132,7 @@ struct ixgbe_vf_info {
 	uint16_t default_vf_vlan_id;
 	uint16_t vlans_enabled;
 	bool clear_to_send;
-	uint16_t tx_rate;
+	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
 	uint16_t vlan_count;
 	uint8_t spoofchk_enabled;
 };
-- 
1.9.0

  parent reply	other threads:[~2014-05-26  7:45 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-26  7:45 [dpdk-dev] [PATCH v2 0/3] Support setting TX rate for queue and VF Ouyang Changchun
2014-05-26  7:45 ` [dpdk-dev] [PATCH v2 1/3] ether: Add API to support " Ouyang Changchun
2014-05-27 22:47   ` Thomas Monjalon
2014-06-05  3:30     ` Ouyang, Changchun
2014-06-10 23:02       ` Thomas Monjalon
2014-05-26  7:45 ` Ouyang Changchun [this message]
2014-05-26  7:45 ` [dpdk-dev] [PATCH v2 3/3] testpmd: Add commands to test the functionality of setting TX rate for queue or VF Ouyang Changchun
2014-05-26 22:52 ` [dpdk-dev] [PATCH v2 0/3] Support setting TX rate for queue and VF Neil Horman
2014-06-05  3:11   ` Ouyang, Changchun
2014-06-05 11:01     ` Neil Horman
     [not found] ` <F52918179C57134FAEC9EA62FA2F9625117C89F0@shsmsx102.ccr.corp.intel.com>
2014-06-05  5:32   ` Liu, Jijiang
2014-06-05 12:41 ` Cao, Waterman
2014-06-06  6:52 ` Xie, Huawei
2014-06-11 14:05   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1401090331-18455-3-git-send-email-changchun.ouyang@intel.com \
    --to=changchun.ouyang@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).