DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/hns3: fix traffic management
@ 2021-06-21  7:38 Min Hu (Connor)
  2021-07-01 15:05 ` Andrew Rybchenko
  0 siblings, 1 reply; 2+ messages in thread
From: Min Hu (Connor) @ 2021-06-21  7:38 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit

From: Huisong Li <lihuisong@huawei.com>

In a multi-TC scenario, if the length of packets destined for different
TCs is different, for example, 64B and 1500B packets destined for TC0 and
TC1 respectively. There is a problem that the bandwidth of the TC to which
large packets are sent is preempted by the TC to which small packets are
sent on the Kunpeng 920 network engine. As a result, the TC bandwidth
accuracy is inaccurate.

To solve this problem, this patch made the following adjustments:
1/ During initialization, firmware reports the capability bit indicating
whether the TM function is supported.
2/ The command word for configuring TC and port rate limiting is added,
instead of reusing the existing command word. And firmware configured
to the correct module.
3/ When the PF driver is loaded, firmware completes the default
initialization of the TC and port.

Fixes: c09c7847d892 ("net/hns3: support traffic management")

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c    |  5 ++-
 drivers/net/hns3/hns3_cmd.h    |  4 +++
 drivers/net/hns3/hns3_dcb.c    |  4 +--
 drivers/net/hns3/hns3_dcb.h    |  2 --
 drivers/net/hns3/hns3_ethdev.h |  4 +++
 drivers/net/hns3/hns3_tm.c     | 69 +++++++++++++++++++++++++++++-------------
 drivers/net/hns3/hns3_tm.h     | 12 ++++++++
 7 files changed, 74 insertions(+), 26 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 44a4e28..cdccbf5 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -427,7 +427,8 @@ hns3_get_caps_name(uint32_t caps_id)
 		{ HNS3_CAPS_STASH_B,           "stash"           },
 		{ HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
 		{ HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
-		{ HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  }
+		{ HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  },
+		{ HNS3_CAPS_TM_B,              "tm_capability"   }
 	};
 	uint32_t i;
 
@@ -503,6 +504,8 @@ hns3_parse_capability(struct hns3_hw *hw,
 				HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
 	if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
 }
 
 static uint32_t
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index eafa365..0c9b8fc 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -162,6 +162,9 @@ enum hns3_opcode_type {
 	HNS3_OPC_TM_INTERNAL_CNT        = 0x0851,
 	HNS3_OPC_TM_INTERNAL_STS_1      = 0x0852,
 
+	HNS3_OPC_TM_PORT_LIMIT_RATE     = 0x0870,
+	HNS3_OPC_TM_TC_LIMIT_RATE       = 0x0871,
+
 	/* Mailbox cmd */
 	HNS3_OPC_MBX_VF_TO_PF           = 0x2001,
 
@@ -319,6 +322,7 @@ enum HNS3_CAPS_BITS {
 	HNS3_CAPS_UDP_TUNNEL_CSUM_B,
 	HNS3_CAPS_RAS_IMP_B,
 	HNS3_CAPS_RXD_ADV_LAYOUT_B = 15,
+	HNS3_CAPS_TM_B = 17,
 };
 
 enum HNS3_API_CAP_BITS {
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 90c0d04..f15c899 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -415,7 +415,7 @@ hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-int
+static int
 hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
 {
 	struct hns3_shaper_parameter shaper_parameter;
@@ -551,7 +551,7 @@ hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-int
+static int
 hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
 {
 	struct hns3_shaper_parameter shaper_parameter;
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index f378bd4..e06ec17 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -209,8 +209,6 @@ int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
 
 int hns3_update_queue_map_configure(struct hns3_adapter *hns);
 int hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed);
-int hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate);
-int hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate);
 uint8_t hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no);
 
 #endif /* _HNS3_DCB_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 575bacd..0b11a8d 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -868,6 +868,7 @@ enum {
 	HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
 	HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B,
 	HNS3_DEV_SUPPORT_RAS_IMP_B,
+	HNS3_DEV_SUPPORT_TM_B,
 };
 
 #define hns3_dev_dcb_supported(hw) \
@@ -904,6 +905,9 @@ enum {
 #define hns3_dev_tx_push_supported(hw) \
 		hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B)
 
+#define hns3_dev_tm_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TM_B)
+
 #define HNS3_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct hns3_adapter *)adapter)->hw)
 #define HNS3_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index aae4970..7fd9818 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -28,8 +28,12 @@ void
 hns3_tm_conf_init(struct rte_eth_dev *dev)
 {
 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
 
+	if (!hns3_dev_tm_supported(hw))
+		return;
+
 	pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
 	pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
 	pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
@@ -50,9 +54,13 @@ void
 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
 {
 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_tm_shaper_profile *shaper_profile;
 	struct hns3_tm_node *tm_node;
 
+	if (!hns3_dev_tm_supported(hw))
+		return;
+
 	if (pf->tm_conf.nb_queue_node > 0) {
 		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
 			TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
@@ -912,40 +920,39 @@ static int
 hns3_tm_config_port_rate(struct hns3_hw *hw,
 			 struct hns3_tm_shaper_profile *shaper_profile)
 {
+	struct hns3_port_limit_rate_cmd *cfg;
+	struct hns3_cmd_desc desc;
 	uint32_t firmware_rate;
 	uint64_t rate;
+	int ret;
 
 	if (shaper_profile) {
 		rate = shaper_profile->profile.peak.rate;
 		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
 	} else {
-		firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
+		firmware_rate = hw->max_tm_rate;
 	}
 
-	/*
-	 * The TM shaper topology after device inited:
-	 *     pri0 shaper   --->|
-	 *     pri1 shaper   --->|
-	 *     ...               |----> pg0 shaper ----> port shaper
-	 *     ...               |
-	 *     priX shaper   --->|
-	 *
-	 * Because port shaper rate maybe changed by firmware, to avoid
-	 * concurrent configure, driver use pg0 shaper to achieve the rate limit
-	 * of port.
-	 *
-	 * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
-	 */
-	return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
+	cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
+	cfg->speed = rte_cpu_to_le_32(firmware_rate);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "failed to config port rate, ret = %d", ret);
+
+	return ret;
 }
 
 static int
-hns3_tm_config_tc_rate(struct hns3_hw *hw,
-		       uint8_t tc_no,
+hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
 		       struct hns3_tm_shaper_profile *shaper_profile)
 {
+	struct hns3_tc_limit_rate_cmd *cfg;
+	struct hns3_cmd_desc desc;
 	uint32_t firmware_rate;
 	uint64_t rate;
+	int ret;
 
 	if (shaper_profile) {
 		rate = shaper_profile->profile.peak.rate;
@@ -954,7 +961,17 @@ hns3_tm_config_tc_rate(struct hns3_hw *hw,
 		firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
 	}
 
-	return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
+	cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
+	cfg->speed = rte_cpu_to_le_32(firmware_rate);
+	cfg->tc_id = tc_no;
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
+			 tc_no, ret);
+
+	return ret;
 }
 
 static bool
@@ -1227,12 +1244,16 @@ static const struct rte_tm_ops hns3_tm_ops = {
 };
 
 int
-hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
-		void *arg)
+hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
 {
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	if (arg == NULL)
 		return -EINVAL;
 
+	if (!hns3_dev_tm_supported(hw))
+		return -EOPNOTSUPP;
+
 	*(const void **)arg = &hns3_tm_ops;
 
 	return 0;
@@ -1243,6 +1264,9 @@ hns3_tm_dev_start_proc(struct hns3_hw *hw)
 {
 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
 
+	if (!hns3_dev_tm_supported(hw))
+		return;
+
 	if (pf->tm_conf.root && !pf->tm_conf.committed)
 		hns3_warn(hw,
 		    "please call hierarchy_commit() before starting the port.");
@@ -1289,6 +1313,9 @@ hns3_tm_conf_update(struct hns3_hw *hw)
 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
 	struct rte_tm_error error;
 
+	if (!hns3_dev_tm_supported(hw))
+		return 0;
+
 	if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
 		return 0;
 
diff --git a/drivers/net/hns3/hns3_tm.h b/drivers/net/hns3/hns3_tm.h
index 1f1f8c9..83e9cc8 100644
--- a/drivers/net/hns3/hns3_tm.h
+++ b/drivers/net/hns3/hns3_tm.h
@@ -9,6 +9,18 @@
 #include <rte_tailq.h>
 #include <rte_tm_driver.h>
 
+struct hns3_port_limit_rate_cmd {
+	uint32_t speed;  /* Unit Mbps */
+	uint32_t rsvd[5];
+};
+
+struct hns3_tc_limit_rate_cmd {
+	uint32_t speed;  /* Unit Mbps */
+	uint8_t tc_id;
+	uint8_t rsvd[3];
+	uint32_t rsvd1[4];
+};
+
 enum hns3_tm_node_type {
 	HNS3_TM_NODE_TYPE_PORT,
 	HNS3_TM_NODE_TYPE_TC,
-- 
2.7.4


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-dev] [PATCH] net/hns3: fix traffic management
  2021-06-21  7:38 [dpdk-dev] [PATCH] net/hns3: fix traffic management Min Hu (Connor)
@ 2021-07-01 15:05 ` Andrew Rybchenko
  0 siblings, 0 replies; 2+ messages in thread
From: Andrew Rybchenko @ 2021-07-01 15:05 UTC (permalink / raw)
  To: Min Hu (Connor), dev; +Cc: ferruh.yigit

On 6/21/21 10:38 AM, Min Hu (Connor) wrote:
> From: Huisong Li <lihuisong@huawei.com>
> 
> In a multi-TC scenario, if the length of packets destined for different
> TCs is different, for example, 64B and 1500B packets destined for TC0 and
> TC1 respectively. There is a problem that the bandwidth of the TC to which
> large packets are sent is preempted by the TC to which small packets are
> sent on the Kunpeng 920 network engine. As a result, the TC bandwidth
> accuracy is inaccurate.
> 
> To solve this problem, this patch made the following adjustments:
> 1/ During initialization, firmware reports the capability bit indicating
> whether the TM function is supported.
> 2/ The command word for configuring TC and port rate limiting is added,
> instead of reusing the existing command word. And firmware configured
> to the correct module.
> 3/ When the PF driver is loaded, firmware completes the default
> initialization of the TC and port.
> 
> Fixes: c09c7847d892 ("net/hns3: support traffic management")
> 
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>

Applied, thanks.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-07-01 15:05 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-21  7:38 [dpdk-dev] [PATCH] net/hns3: fix traffic management Min Hu (Connor)
2021-07-01 15:05 ` Andrew Rybchenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).