DPDK patches and discussions
 help / color / mirror / Atom feed
From: Lijun Ou <oulijun@huawei.com>
To: <ferruh.yigit@intel.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function
Date: Thu, 14 Jan 2021 21:33:31 +0800	[thread overview]
Message-ID: <1610631217-14216-3-git-send-email-oulijun@huawei.com> (raw)
In-Reply-To: <1610631217-14216-1-git-send-email-oulijun@huawei.com>

From: Chengwen Feng <fengchengwen@huawei.com>

This patch support RTE TM ops function for PF, which could
used to:
1. config port's peak rate.
2. config TC's peak rate.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_dcb.c    |  216 ++++---
 drivers/net/hns3/hns3_dcb.h    |    3 +
 drivers/net/hns3/hns3_ethdev.c |   20 +-
 drivers/net/hns3/hns3_ethdev.h |   14 +
 drivers/net/hns3/hns3_tm.c     | 1291 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_tm.h     |  103 ++++
 drivers/net/hns3/meson.build   |    3 +-
 7 files changed, 1554 insertions(+), 96 deletions(-)
 create mode 100644 drivers/net/hns3/hns3_tm.c
 create mode 100644 drivers/net/hns3/hns3_tm.h

diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index b32d5af..5aa374c 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -76,16 +76,13 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
 		shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
 	} else if (ir_calc > ir) {
 		/* Increasing the denominator to select ir_s value */
-		do {
+		while (ir_calc >= ir && ir) {
 			ir_s_calc++;
 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
-		} while (ir_calc > ir);
+		}
 
-		if (ir_calc == ir)
-			shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
-		else
-			shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
-				 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
+		shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+				    (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
 	} else {
 		/*
 		 * Increasing the numerator to select ir_u value. ir_u_calc will
@@ -320,6 +317,10 @@ hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
 {
 	uint32_t shapping_para = 0;
 
+	/* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
+	if (ir_b == 0)
+		return shapping_para;
+
 	hns3_dcb_set_field(shapping_para, IR_B, ir_b);
 	hns3_dcb_set_field(shapping_para, IR_U, ir_u);
 	hns3_dcb_set_field(shapping_para, IR_S, ir_s);
@@ -402,14 +403,57 @@ hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-static int
-hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
+int
+hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
 {
-	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_shaper_parameter shaper_parameter;
-	struct hns3_pf *pf = &hns->pf;
 	uint32_t ir_u, ir_b, ir_s;
 	uint32_t shaper_para;
+	int ret;
+
+	/* Calc shaper para */
+	ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
+				    &shaper_parameter);
+	if (ret) {
+		hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
+			 ret);
+		return ret;
+	}
+
+	shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
+
+	ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
+				       shaper_para, rate);
+	if (ret) {
+		hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
+			 ret);
+		return ret;
+	}
+
+	ir_b = shaper_parameter.ir_b;
+	ir_u = shaper_parameter.ir_u;
+	ir_s = shaper_parameter.ir_s;
+	shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
+
+	ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
+				       shaper_para, rate);
+	if (ret) {
+		hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
+			 ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
 	uint32_t rate;
 	uint8_t i;
 	int ret;
@@ -421,44 +465,9 @@ hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
 	/* Pg to pri */
 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
 		rate = hw->dcb_info.pg_info[i].bw_limit;
-
-		/* Calc shaper para */
-		ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
-					    &shaper_parameter);
-		if (ret) {
-			hns3_err(hw, "calculate shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
-
-		shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
-
-		ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
-					       shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config PG CIR shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
-
-		ir_b = shaper_parameter.ir_b;
-		ir_u = shaper_parameter.ir_u;
-		ir_s = shaper_parameter.ir_s;
-		shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
-
-		ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
-					       shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config PG PIR shaper parameter failed: %d",
-				 ret);
+		ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
+		if (ret)
 			return ret;
-		}
 	}
 
 	return 0;
@@ -530,74 +539,75 @@ hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-static int
-hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
+int
+hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
 {
 	struct hns3_shaper_parameter shaper_parameter;
 	uint32_t ir_u, ir_b, ir_s;
 	uint32_t shaper_para;
-	uint32_t rate;
-	int ret, i;
+	int ret;
 
-	for (i = 0; i < hw->dcb_info.num_tc; i++) {
-		rate = hw->dcb_info.tc_info[i].bw_limit;
-		ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
-					    &shaper_parameter);
-		if (ret) {
-			hns3_err(hw, "calculate shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
+	ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
+				    &shaper_parameter);
+	if (ret) {
+		hns3_err(hw, "calculate shaper parameter failed: %d.",
+			 ret);
+		return ret;
+	}
 
-		shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
+	shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
 
-		ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
-						shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config priority CIR shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
+	ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
+					shaper_para, rate);
+	if (ret) {
+		hns3_err(hw,
+			 "config priority CIR shaper parameter failed: %d.",
+			 ret);
+		return ret;
+	}
 
-		ir_b = shaper_parameter.ir_b;
-		ir_u = shaper_parameter.ir_u;
-		ir_s = shaper_parameter.ir_s;
-		shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
+	ir_b = shaper_parameter.ir_b;
+	ir_u = shaper_parameter.ir_u;
+	ir_s = shaper_parameter.ir_s;
+	shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
 
-		ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
-						shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config priority PIR shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
+	ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
+					shaper_para, rate);
+	if (ret) {
+		hns3_err(hw,
+			 "config priority PIR shaper parameter failed: %d.",
+			 ret);
+		return ret;
 	}
 
 	return 0;
 }
 
-
 static int
 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
 {
-	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
-	struct hns3_pf *pf = &hns->pf;
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	uint32_t rate;
+	uint8_t i;
 	int ret;
 
 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
 		return -EINVAL;
 
-	ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
-	if (ret)
-		hns3_err(hw, "config port shaper failed: %d", ret);
+	for (i = 0; i < hw->dcb_info.num_tc; i++) {
+		rate = hw->dcb_info.tc_info[i].bw_limit;
+		ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
+		if (ret) {
+			hns3_err(hw, "config pri shaper failed: %d.", ret);
+			return ret;
+		}
+	}
 
-	return ret;
+	return 0;
 }
 
 static int
@@ -680,6 +690,26 @@ hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
 	return 0;
 }
 
+uint8_t
+hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
+{
+	struct hns3_tc_queue_info *tc_queue;
+	uint8_t i;
+
+	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+		tc_queue = &hw->tc_queue[i];
+		if (!tc_queue->enable)
+			continue;
+
+		if (txq_no >= tc_queue->tqp_offset &&
+		    txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
+			return i;
+	}
+
+	/* return TC0 in default case */
+	return 0;
+}
+
 int
 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
 {
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index fee23d9..8248434 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -209,5 +209,8 @@ int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
 
 int hns3_dcb_cfg_update(struct hns3_adapter *hns);
 int hns3_dcb_port_shaper_cfg(struct hns3_hw *hw);
+int hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate);
+int hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate);
+uint8_t hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no);
 
 #endif /* _HNS3_DCB_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 888338a..b6308ee 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -5,7 +5,6 @@
 #include <rte_alarm.h>
 #include <rte_bus_pci.h>
 #include <rte_ethdev_pci.h>
-#include <rte_io.h>
 #include <rte_pci.h>
 
 #include "hns3_ethdev.h"
@@ -2494,7 +2493,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	return 0;
 }
 
-static int
+int
 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
@@ -4679,6 +4678,8 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_enable_intr;
 	}
 
+	hns3_tm_conf_init(eth_dev);
+
 	return 0;
 
 err_enable_intr:
@@ -4712,6 +4713,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	hns3_tm_conf_uninit(eth_dev);
 	hns3_enable_hw_error_intr(hns, false);
 	hns3_rss_uninit(hns);
 	(void)hns3_config_gro(hw, false);
@@ -4739,6 +4741,16 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
 	if (ret)
 		return ret;
 
+	/*
+	 * The hns3_dcb_cfg_update may configure TM module, so
+	 * hns3_tm_conf_update must called later.
+	 */
+	ret = hns3_tm_conf_update(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
+		return ret;
+	}
+
 	ret = hns3_init_queues(hns, reset_queue);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
@@ -4936,6 +4948,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
 	 */
 	hns3_start_tqps(hw);
 
+	hns3_tm_dev_start_proc(hw);
+
 	hns3_info(hw, "hns3 dev start successful!");
 	return 0;
 }
@@ -5019,6 +5033,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 
 	rte_spinlock_lock(&hw->lock);
 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+		hns3_tm_dev_stop_proc(hw);
 		hns3_stop_tqps(hw);
 		hns3_do_stop(hns);
 		hns3_unmap_rx_interrupt(dev);
@@ -6089,6 +6104,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.fec_get_capability     = hns3_fec_get_capability,
 	.fec_get                = hns3_fec_get,
 	.fec_set                = hns3_fec_set,
+	.tm_ops_get             = hns3_tm_ops_get,
 };
 
 static const struct hns3_reset_ops hns3_reset_ops = {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 0d86683..0d17170 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -7,12 +7,16 @@
 
 #include <sys/time.h>
 #include <rte_ethdev_driver.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
 
 #include "hns3_cmd.h"
 #include "hns3_mbx.h"
 #include "hns3_rss.h"
 #include "hns3_fdir.h"
 #include "hns3_stats.h"
+#include "hns3_tm.h"
 
 /* Vendor ID */
 #define PCI_VENDOR_ID_HUAWEI			0x19e5
@@ -727,6 +731,8 @@ struct hns3_pf {
 
 	struct hns3_fdir_info fdir; /* flow director info */
 	LIST_HEAD(counters, hns3_flow_counter) flow_counters;
+
+	struct hns3_tm_conf tm_conf;
 };
 
 struct hns3_vf {
@@ -796,6 +802,12 @@ struct hns3_adapter {
 #define HNS3_DEV_HW_TO_ADAPTER(hw) \
 	container_of(hw, struct hns3_adapter, hw)
 
+static inline struct hns3_pf *HNS3_DEV_HW_TO_PF(struct hns3_hw *hw)
+{
+	struct hns3_adapter *adapter = HNS3_DEV_HW_TO_ADAPTER(hw);
+	return &adapter->pf;
+}
+
 #define hns3_set_field(origin, mask, shift, val) \
 	do { \
 		(origin) &= (~(mask)); \
@@ -937,6 +949,8 @@ bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
 void hns3_update_link_status(struct hns3_hw *hw);
 void hns3_ether_format_addr(char *buf, uint16_t size,
 			const struct rte_ether_addr *ether_addr);
+int hns3_dev_infos_get(struct rte_eth_dev *eth_dev,
+		       struct rte_eth_dev_info *info);
 
 static inline bool
 is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
new file mode 100644
index 0000000..d1639d4
--- /dev/null
+++ b/drivers/net/hns3/hns3_tm.c
@@ -0,0 +1,1291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020-2020 Hisilicon Limited.
+ */
+
+#include <rte_malloc.h>
+
+#include "hns3_ethdev.h"
+#include "hns3_dcb.h"
+#include "hns3_logs.h"
+#include "hns3_tm.h"
+
+static inline uint32_t
+hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
+{
+	/*
+	 * This API will called in pci device probe stage, we can't call
+	 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
+	 * not setup), so we call the hns3_dev_infos_get.
+	 */
+	struct rte_eth_dev_info dev_info;
+
+	memset(&dev_info, 0, sizeof(dev_info));
+	(void)hns3_dev_infos_get(dev, &dev_info);
+	return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
+}
+
+void
+hns3_tm_conf_init(struct rte_eth_dev *dev)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
+	pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
+	pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
+
+	TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+	pf->tm_conf.nb_shaper_profile = 0;
+
+	pf->tm_conf.root = NULL;
+	TAILQ_INIT(&pf->tm_conf.tc_list);
+	TAILQ_INIT(&pf->tm_conf.queue_list);
+	pf->tm_conf.nb_tc_node = 0;
+	pf->tm_conf.nb_queue_node = 0;
+
+	pf->tm_conf.committed = false;
+}
+
+void
+hns3_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_shaper_profile *shaper_profile;
+	struct hns3_tm_node *tm_node;
+
+	if (pf->tm_conf.nb_queue_node > 0) {
+		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+			TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+			rte_free(tm_node);
+		}
+		pf->tm_conf.nb_queue_node = 0;
+	}
+
+	if (pf->tm_conf.nb_tc_node > 0) {
+		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+			TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+			rte_free(tm_node);
+		}
+		pf->tm_conf.nb_tc_node = 0;
+	}
+
+	if (pf->tm_conf.root != NULL) {
+		rte_free(pf->tm_conf.root);
+		pf->tm_conf.root = NULL;
+	}
+
+	if (pf->tm_conf.nb_shaper_profile > 0) {
+		while ((shaper_profile =
+		       TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+			TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+				     shaper_profile, node);
+			rte_free(shaper_profile);
+		}
+		pf->tm_conf.nb_shaper_profile = 0;
+	}
+
+	pf->tm_conf.nb_leaf_nodes_max = 0;
+	pf->tm_conf.nb_nodes_max = 0;
+	pf->tm_conf.nb_shaper_profile_max = 0;
+}
+
+static inline uint64_t
+hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
+{
+#define FIRMWARE_TO_TM_RATE_SCALE	125000
+	/* tm rate unit is Bps, firmware rate is Mbps */
+	return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
+}
+
+static inline uint32_t
+hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
+{
+#define TM_TO_FIRMWARE_RATE_SCALE	125000
+	/* tm rate unit is Bps, firmware rate is Mbps */
+	return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
+}
+
+static int
+hns3_tm_capabilities_get(struct rte_eth_dev *dev,
+			 struct rte_tm_capabilities *cap,
+			 struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	if (cap == NULL || error == NULL)
+		return -EINVAL;
+
+	error->type = RTE_TM_ERROR_TYPE_NONE;
+
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
+	cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+	cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
+	cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
+	cap->shaper_private_dual_rate_n_max = 0;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max =
+		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
+	cap->shaper_shared_n_max = 0;
+	cap->shaper_shared_n_nodes_per_shaper_max = 0;
+	cap->shaper_shared_n_shapers_per_node_max = 0;
+	cap->shaper_shared_dual_rate_n_max = 0;
+	cap->shaper_shared_rate_min = 0;
+	cap->shaper_shared_rate_max = 0;
+
+	cap->sched_n_children_max = max_tx_queues;
+	cap->sched_sp_n_priorities_max = 1;
+	cap->sched_wfq_n_children_per_group_max = 0;
+	cap->sched_wfq_n_groups_max = 0;
+	cap->sched_wfq_weight_max = 1;
+
+	cap->cman_head_drop_supported = 0;
+	cap->dynamic_update_mask = 0;
+	cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+	cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+	cap->cman_wred_context_n_max = 0;
+	cap->cman_wred_context_private_n_max = 0;
+	cap->cman_wred_context_shared_n_max = 0;
+	cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+	cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+	cap->stats_mask = 0;
+
+	return 0;
+}
+
+static struct hns3_tm_shaper_profile *
+hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
+			      uint32_t shaper_profile_id)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_shaper_profile_list *shaper_profile_list =
+		&pf->tm_conf.shaper_profile_list;
+	struct hns3_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
+static int
+hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (profile->committed.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+		error->message = "committed rate not supported";
+		return -EINVAL;
+	}
+
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+
+	if (profile->peak.rate >
+	    hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+		error->message = "peak rate too large";
+		return -EINVAL;
+	}
+
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	if (profile->packet_mode) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
+		error->message = "packet mode not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id,
+			   struct rte_tm_shaper_params *profile,
+			   struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (profile == NULL || error == NULL)
+		return -EINVAL;
+
+	if (pf->tm_conf.nb_shaper_profile >=
+	    pf->tm_conf.nb_shaper_profile_max) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "too much profiles";
+		return -EINVAL;
+	}
+
+	ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
+				     sizeof(struct hns3_tm_shaper_profile),
+				     0);
+	if (shaper_profile == NULL)
+		return -ENOMEM;
+
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	memcpy(&shaper_profile->profile, profile,
+	       sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+	pf->tm_conf.nb_shaper_profile++;
+
+	return 0;
+}
+
+static int
+hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id,
+			   struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_shaper_profile *shaper_profile;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
+	if (shaper_profile == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+	pf->tm_conf.nb_shaper_profile--;
+
+	return 0;
+}
+
+static struct hns3_tm_node *
+hns3_tm_node_search(struct rte_eth_dev *dev,
+		    uint32_t node_id,
+		    enum hns3_tm_node_type *node_type)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct hns3_tm_node *tm_node;
+
+	if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+		*node_type = HNS3_TM_NODE_TYPE_PORT;
+		return pf->tm_conf.root;
+	}
+
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = HNS3_TM_NODE_TYPE_TC;
+			return tm_node;
+		}
+	}
+
+	TAILQ_FOREACH(tm_node, queue_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = HNS3_TM_NODE_TYPE_QUEUE;
+			return tm_node;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
+				 struct rte_tm_node_params *params,
+				 struct rte_tm_error *error)
+{
+	struct hns3_tm_shaper_profile *shaper_profile;
+
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = hns3_tm_shaper_profile_search(dev,
+				 params->shaper_profile_id);
+		if (shaper_profile == NULL) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
+	if (params->nonleaf.wfq_weight_mode) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+		error->message = "WFQ not supported";
+		return -EINVAL;
+	}
+
+	if (params->nonleaf.n_sp_priorities != 1) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+		error->message = "SP priority not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
+			      struct rte_tm_node_params *params,
+			      struct rte_tm_error *error)
+
+{
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+		error->message = "shaper not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.cman) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+		error->message = "congestion management not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.wred.shared_wred_context_id) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.wred.n_shared_wred_contexts) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+			 uint32_t priority, uint32_t weight,
+			 struct rte_tm_node_params *params,
+			 struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	if (hns3_tm_node_search(dev, node_id, &node_type)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used";
+		return -EINVAL;
+	}
+
+	if (priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority should be 0";
+		return -EINVAL;
+	}
+
+	if (weight != 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight must be 1";
+		return -EINVAL;
+	}
+
+	if (params->shared_shaper_id) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+	if (params->n_shared_shapers) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+
+	if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
+		return hns3_tm_nonleaf_node_param_check(dev, params, error);
+	else
+		return hns3_tm_leaf_node_param_check(dev, params, error);
+}
+
+static int
+hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		      uint32_t level_id, struct rte_tm_node_params *params,
+		      struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node *tm_node;
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != HNS3_TM_NODE_LEVEL_PORT) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "wrong level";
+		return -EINVAL;
+	}
+
+	if (node_id != pf->tm_conf.nb_nodes_max - 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid port node ID";
+		return -EINVAL;
+	}
+
+	if (pf->tm_conf.root) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "already have a root";
+		return -EINVAL;
+	}
+
+	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
+	if (tm_node == NULL)
+		return -ENOMEM;
+
+	tm_node->id = node_id;
+	tm_node->reference_count = 0;
+	tm_node->parent = NULL;
+	tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
+				  params->shaper_profile_id);
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+	pf->tm_conf.root = tm_node;
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		    uint32_t level_id, struct hns3_tm_node *parent_node,
+		    struct rte_tm_node_params *params,
+		    struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node *tm_node;
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != HNS3_TM_NODE_LEVEL_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "wrong level";
+		return -EINVAL;
+	}
+
+	if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
+	    node_id < pf->tm_conf.nb_leaf_nodes_max ||
+	    hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid tc node ID";
+		return -EINVAL;
+	}
+
+	if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "too many TCs";
+		return -EINVAL;
+	}
+
+	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
+	if (tm_node == NULL)
+		return -ENOMEM;
+
+	tm_node->id = node_id;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
+					params->shaper_profile_id);
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
+	pf->tm_conf.nb_tc_node++;
+	tm_node->parent->reference_count++;
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		       uint32_t level_id, struct hns3_tm_node *parent_node,
+		       struct rte_tm_node_params *params,
+		       struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node *tm_node;
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "wrong level";
+		return -EINVAL;
+	}
+
+	/* note: dev->data->nb_tx_queues <= max_tx_queues */
+	if (node_id >= dev->data->nb_tx_queues) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid queue node ID";
+		return -EINVAL;
+	}
+
+	if (hns3_txq_mapped_tc_get(hw, node_id) !=
+	    hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "queue's TC not match parent's TC";
+		return -EINVAL;
+	}
+
+	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
+	if (tm_node == NULL)
+		return -ENOMEM;
+
+	tm_node->id = node_id;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
+	pf->tm_conf.nb_queue_node++;
+	tm_node->parent->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		 uint32_t parent_node_id, uint32_t priority,
+		 uint32_t weight, uint32_t level_id,
+		 struct rte_tm_node_params *params,
+		 struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_node *parent_node;
+	int ret;
+
+	if (params == NULL || error == NULL)
+		return -EINVAL;
+
+	if (pf->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
+				       params, error);
+	if (ret)
+		return ret;
+
+	/* root node who don't have a parent */
+	if (parent_node_id == RTE_TM_NODE_ID_NULL)
+		return hns3_tm_port_node_add(dev, node_id, level_id,
+					     params, error);
+
+	parent_node = hns3_tm_node_search(dev, parent_node_id,
+					  &parent_node_type);
+	if (parent_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent not exist";
+		return -EINVAL;
+	}
+
+	if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
+	    parent_node_type != HNS3_TM_NODE_TYPE_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent is not port or TC";
+		return -EINVAL;
+	}
+
+	if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
+		return hns3_tm_tc_node_add(dev, node_id, level_id,
+					   parent_node, params, error);
+	else
+		return hns3_tm_queue_node_add(dev, node_id, level_id,
+					      parent_node, params, error);
+}
+
+static void
+hns3_tm_node_do_delete(struct hns3_pf *pf,
+		       enum hns3_tm_node_type node_type,
+		       struct hns3_tm_node *tm_node)
+{
+	if (node_type == HNS3_TM_NODE_TYPE_PORT) {
+		if (tm_node->shaper_profile)
+			tm_node->shaper_profile->reference_count--;
+		rte_free(tm_node);
+		pf->tm_conf.root = NULL;
+		return;
+	}
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count--;
+	tm_node->parent->reference_count--;
+	if (node_type == HNS3_TM_NODE_TYPE_TC) {
+		TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+		pf->tm_conf.nb_tc_node--;
+	} else {
+		TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+		pf->tm_conf.nb_queue_node--;
+	}
+	rte_free(tm_node);
+}
+
+static int
+hns3_tm_node_delete(struct rte_eth_dev *dev,
+		    uint32_t node_id,
+		    struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_node *tm_node;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	if (pf->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (tm_node->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "cannot delete a node which has children";
+		return -EINVAL;
+	}
+
+	hns3_tm_node_do_delete(pf, node_type, tm_node);
+
+	return 0;
+}
+
+static int
+hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+		      int *is_leaf, struct rte_tm_error *error)
+{
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_node *tm_node;
+
+	if (is_leaf == NULL || error == NULL)
+		return -EINVAL;
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
+		*is_leaf = true;
+	else
+		*is_leaf = false;
+
+	return 0;
+}
+
+static void
+hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
+				       uint32_t level_id,
+				       struct rte_tm_level_capabilities *cap)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
+		cap->n_nodes_max = 1;
+		cap->n_nodes_nonleaf_max = 1;
+		cap->n_nodes_leaf_max = 0;
+	} else {
+		cap->n_nodes_max = HNS3_MAX_TC_NUM;
+		cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
+		cap->n_nodes_leaf_max = 0;
+	}
+
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+
+	cap->nonleaf.shaper_private_supported = true;
+	cap->nonleaf.shaper_private_dual_rate_supported = false;
+	cap->nonleaf.shaper_private_rate_min = 0;
+	cap->nonleaf.shaper_private_rate_max =
+		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
+	cap->nonleaf.shaper_shared_n_max = 0;
+	if (level_id == HNS3_TM_NODE_LEVEL_PORT)
+		cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
+	else
+		cap->nonleaf.sched_n_children_max = max_tx_queues;
+	cap->nonleaf.sched_sp_n_priorities_max = 1;
+	cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+	cap->nonleaf.sched_wfq_n_groups_max = 0;
+	cap->nonleaf.sched_wfq_weight_max = 1;
+	cap->nonleaf.stats_mask = 0;
+}
+
+static void
+hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
+				    struct rte_tm_level_capabilities *cap)
+{
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	cap->n_nodes_max = max_tx_queues;
+	cap->n_nodes_nonleaf_max = 0;
+	cap->n_nodes_leaf_max = max_tx_queues;
+
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+
+	cap->leaf.shaper_private_supported = false;
+	cap->leaf.shaper_private_dual_rate_supported = false;
+	cap->leaf.shaper_private_rate_min = 0;
+	cap->leaf.shaper_private_rate_max = 0;
+	cap->leaf.shaper_shared_n_max = 0;
+	cap->leaf.cman_head_drop_supported = false;
+	cap->leaf.cman_wred_context_private_supported = false;
+	cap->leaf.cman_wred_context_shared_n_max = 0;
+	cap->leaf.stats_mask = 0;
+}
+
+static int
+hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
+			       uint32_t level_id,
+			       struct rte_tm_level_capabilities *cap,
+			       struct rte_tm_error *error)
+{
+	if (cap == NULL || error == NULL)
+		return -EINVAL;
+
+	if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "too deep level";
+		return -EINVAL;
+	}
+
+	memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
+
+	if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
+		hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
+	else
+		hns3_tm_leaf_level_capabilities_get(dev, cap);
+
+	return 0;
+}
+
+static void
+hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
+				      enum hns3_tm_node_type node_type,
+				      struct rte_tm_node_capabilities *cap)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	cap->shaper_private_supported = true;
+	cap->shaper_private_dual_rate_supported = false;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max =
+		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
+	cap->shaper_shared_n_max = 0;
+
+	if (node_type == HNS3_TM_NODE_TYPE_PORT)
+		cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
+	else
+		cap->nonleaf.sched_n_children_max = max_tx_queues;
+	cap->nonleaf.sched_sp_n_priorities_max = 1;
+	cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+	cap->nonleaf.sched_wfq_n_groups_max = 0;
+	cap->nonleaf.sched_wfq_weight_max = 1;
+
+	cap->stats_mask = 0;
+}
+
+static void
+hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+				   struct rte_tm_node_capabilities *cap)
+{
+	cap->shaper_private_supported = false;
+	cap->shaper_private_dual_rate_supported = false;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max = 0;
+	cap->shaper_shared_n_max = 0;
+
+	cap->leaf.cman_head_drop_supported = false;
+	cap->leaf.cman_wred_context_private_supported = false;
+	cap->leaf.cman_wred_context_shared_n_max = 0;
+
+	cap->stats_mask = 0;
+}
+
+static int
+hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
+			      uint32_t node_id,
+			      struct rte_tm_node_capabilities *cap,
+			      struct rte_tm_error *error)
+{
+	enum hns3_tm_node_type node_type;
+	struct hns3_tm_node *tm_node;
+
+	if (cap == NULL || error == NULL)
+		return -EINVAL;
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
+
+	if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
+		hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
+	else
+		hns3_tm_leaf_node_capabilities_get(dev, cap);
+
+	return 0;
+}
+
+static int
+hns3_tm_config_port_rate(struct hns3_hw *hw,
+			 struct hns3_tm_shaper_profile *shaper_profile)
+{
+	uint32_t firmware_rate;
+	uint64_t rate;
+
+	if (shaper_profile) {
+		rate = shaper_profile->profile.peak.rate;
+		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
+	} else {
+		firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
+	}
+
+	/*
+	 * The TM shaper topology after device inited:
+	 *     pri0 shaper   --->|
+	 *     pri1 shaper   --->|
+	 *     ...               |----> pg0 shaper ----> port shaper
+	 *     ...               |
+	 *     priX shaper   --->|
+	 *
+	 * Because port shaper rate maybe changed by firmware, to avoid
+	 * concurrent configure, driver use pg0 shaper to achieve the rate limit
+	 * of port.
+	 *
+	 * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
+	 */
+	return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
+}
+
+static int
+hns3_tm_config_tc_rate(struct hns3_hw *hw,
+		       uint8_t tc_no,
+		       struct hns3_tm_shaper_profile *shaper_profile)
+{
+	uint32_t firmware_rate;
+	uint64_t rate;
+
+	if (shaper_profile) {
+		rate = shaper_profile->profile.peak.rate;
+		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
+	} else {
+		firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
+	}
+
+	return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
+}
+
+static bool
+hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct hns3_tm_conf *tm_conf = &pf->tm_conf;
+	struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
+	struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
+	struct hns3_tm_node *tm_node;
+
+	/* TC */
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (!tm_node->reference_count) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "TC without queue assigned";
+			return false;
+		}
+
+		if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
+			hw->num_tc) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "node's TC not exist";
+			return false;
+		}
+	}
+
+	/* Queue */
+	TAILQ_FOREACH(tm_node, queue_list, node) {
+		if (tm_node->id >= hw->data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "node's queue invalid";
+			return false;
+		}
+
+		if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
+		    hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "queue's TC not match parent's TC";
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int
+hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
+			    struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct hns3_tm_node *tm_node;
+	uint8_t tc_no;
+	int ret;
+
+	/* port */
+	tm_node = pf->tm_conf.root;
+	if (tm_node->shaper_profile) {
+		ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
+		if (ret) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+			error->message = "fail to set port peak rate";
+			return -EIO;
+		}
+	}
+
+	/* TC */
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->shaper_profile == NULL)
+			continue;
+
+		tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
+		ret = hns3_tm_config_tc_rate(hw, tc_no,
+					     tm_node->shaper_profile);
+		if (ret) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "fail to set TC peak rate";
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
+			 int clear_on_fail,
+			 struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "device is resetting";
+		/* don't goto fail_clear, user may try later */
+		return -EBUSY;
+	}
+
+	if (pf->tm_conf.root == NULL)
+		goto done;
+
+	/* check configure before commit make sure key configure not violated */
+	if (!hns3_tm_configure_check(hw, error))
+		goto fail_clear;
+
+	ret = hns3_tm_hierarchy_do_commit(hw, error);
+	if (ret)
+		goto fail_clear;
+
+done:
+	pf->tm_conf.committed = true;
+	return 0;
+
+fail_clear:
+	if (clear_on_fail) {
+		hns3_tm_conf_uninit(dev);
+		hns3_tm_conf_init(dev);
+	}
+	return -EINVAL;
+}
+
+static int
+hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
+			      int clear_on_fail,
+			      struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
+	rte_spinlock_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int
+hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
+			      uint32_t node_id,
+			      enum hns3_tm_node_type node_type,
+			      struct hns3_tm_shaper_profile *shaper_profile,
+			      struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	uint8_t tc_no;
+	int ret;
+
+	if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
+		if (shaper_profile != NULL) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+			error->message = "queue node shaper not supported";
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	if (!pf->tm_conf.committed)
+		return 0;
+
+	if (node_type == HNS3_TM_NODE_TYPE_PORT) {
+		ret = hns3_tm_config_port_rate(hw, shaper_profile);
+		if (ret) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+			error->message = "fail to update port peak rate";
+		}
+
+		return ret;
+	}
+
+	/*
+	 * update TC's shaper
+	 */
+	tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
+	ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
+	if (ret) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "fail to update TC peak rate";
+	}
+
+	return ret;
+}
+
+static int
+hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
+			   uint32_t node_id,
+			   uint32_t shaper_profile_id,
+			   struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_shaper_profile *profile = NULL;
+	struct hns3_tm_node *tm_node;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "device is resetting";
+		return -EBUSY;
+	}
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (shaper_profile_id == tm_node->params.shaper_profile_id)
+		return 0;
+
+	if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
+		if (profile == NULL) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+			error->message = "profile ID not exist";
+			return -EINVAL;
+		}
+	}
+
+	if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
+					  profile, error))
+		return -EINVAL;
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count--;
+	tm_node->shaper_profile = profile;
+	tm_node->params.shaper_profile_id = shaper_profile_id;
+	if (profile != NULL)
+		profile->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
+				uint32_t node_id,
+				uint32_t shaper_profile_id,
+				struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_tm_node_shaper_update(dev, node_id,
+					 shaper_profile_id, error);
+	rte_spinlock_unlock(&hw->lock);
+
+	return ret;
+}
+
+static const struct rte_tm_ops hns3_tm_ops = {
+	.capabilities_get       = hns3_tm_capabilities_get,
+	.shaper_profile_add     = hns3_tm_shaper_profile_add,
+	.shaper_profile_delete  = hns3_tm_shaper_profile_del,
+	.node_add               = hns3_tm_node_add,
+	.node_delete            = hns3_tm_node_delete,
+	.node_type_get          = hns3_tm_node_type_get,
+	.level_capabilities_get = hns3_tm_level_capabilities_get,
+	.node_capabilities_get  = hns3_tm_node_capabilities_get,
+	.hierarchy_commit       = hns3_tm_hierarchy_commit_wrap,
+	.node_shaper_update     = hns3_tm_node_shaper_update_wrap,
+};
+
+int
+hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+		void *arg)
+{
+	if (arg == NULL)
+		return -EINVAL;
+
+	*(const void **)arg = &hns3_tm_ops;
+
+	return 0;
+}
+
+void
+hns3_tm_dev_start_proc(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+
+	if (pf->tm_conf.root && !pf->tm_conf.committed)
+		hns3_warn(hw,
+		    "please call hierarchy_commit() before starting the port.");
+}
+
+/*
+ * We need clear tm_conf committed flag when device stop so that user can modify
+ * tm configuration (e.g. add or delete node).
+ *
+ * If user don't call hierarchy commit when device start later, the Port/TC's
+ * shaper rate still the same as previous committed.
+ *
+ * To avoid the above problem, we need recover Port/TC shaper rate when device
+ * stop.
+ */
+void
+hns3_tm_dev_stop_proc(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct hns3_tm_node *tm_node;
+	uint8_t tc_no;
+
+	if (!pf->tm_conf.committed)
+		return;
+
+	tm_node = pf->tm_conf.root;
+	if (tm_node != NULL && tm_node->shaper_profile)
+		(void)hns3_tm_config_port_rate(hw, NULL);
+
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->shaper_profile == NULL)
+			continue;
+		tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
+		(void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
+	}
+
+	pf->tm_conf.committed = false;
+}
+
+int
+hns3_tm_conf_update(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct rte_tm_error error;
+
+	if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
+		return 0;
+
+	memset(&error, 0, sizeof(struct rte_tm_error));
+	return hns3_tm_hierarchy_do_commit(hw, &error);
+}
diff --git a/drivers/net/hns3/hns3_tm.h b/drivers/net/hns3/hns3_tm.h
new file mode 100644
index 0000000..d8de3e4
--- /dev/null
+++ b/drivers/net/hns3/hns3_tm.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020-2020 Hisilicon Limited.
+ */
+
+#ifndef _HNS3_TM_H_
+#define _HNS3_TM_H_
+
+#include <stdint.h>
+#include <rte_tailq.h>
+#include <rte_tm_driver.h>
+
+enum hns3_tm_node_type {
+	HNS3_TM_NODE_TYPE_PORT,
+	HNS3_TM_NODE_TYPE_TC,
+	HNS3_TM_NODE_TYPE_QUEUE,
+	HNS3_TM_NODE_TYPE_MAX,
+};
+
+enum hns3_tm_node_level {
+	HNS3_TM_NODE_LEVEL_PORT,
+	HNS3_TM_NODE_LEVEL_TC,
+	HNS3_TM_NODE_LEVEL_QUEUE,
+	HNS3_TM_NODE_LEVEL_MAX,
+};
+
+struct hns3_tm_shaper_profile {
+	TAILQ_ENTRY(hns3_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(hns3_shaper_profile_list, hns3_tm_shaper_profile);
+
+struct hns3_tm_node {
+	TAILQ_ENTRY(hns3_tm_node) node;
+	uint32_t id;
+	uint32_t reference_count;
+	struct hns3_tm_node *parent;
+	struct hns3_tm_shaper_profile *shaper_profile;
+	struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(hns3_tm_node_list, hns3_tm_node);
+
+struct hns3_tm_conf {
+	uint32_t nb_leaf_nodes_max; /* max numbers of leaf nodes */
+	uint32_t nb_nodes_max; /* max numbers of nodes */
+	uint32_t nb_shaper_profile_max; /* max numbers of shaper profile */
+
+	struct hns3_shaper_profile_list shaper_profile_list;
+	uint32_t nb_shaper_profile; /* number of shaper profile */
+
+	struct hns3_tm_node *root;
+	struct hns3_tm_node_list tc_list;
+	struct hns3_tm_node_list queue_list;
+	uint32_t nb_tc_node; /* number of added TC nodes */
+	uint32_t nb_queue_node; /* number of added queue nodes */
+
+	/*
+	 * This flag is used to check if APP can change the TM node
+	 * configuration.
+	 * When it's true, means the configuration is applied to HW,
+	 * APP should not add/delete the TM node configuration.
+	 * When starting the port, APP should call the hierarchy_commit API to
+	 * set this flag to true. When stopping the port, this flag should be
+	 * set to false.
+	 */
+	bool committed;
+};
+
+/*
+ * This API used to calc node TC no. User must make sure the node id is in the
+ * TC node id range.
+ *
+ * User could call rte_eth_dev_info_get API to get port's max_tx_queues, The TM
+ * id's assignment should following the below rules:
+ *     [0, max_tx_queues-1]: correspond queues's node id
+ *     max_tx_queues + 0   : correspond TC0's node id
+ *     max_tx_queues + 1   : correspond TC1's node id
+ *     ...
+ *     max_tx_queues + 7   : correspond TC7's node id
+ *     max_tx_queues + 8   : correspond port's node id
+ *
+ */
+static inline uint8_t
+hns3_tm_calc_node_tc_no(struct hns3_tm_conf *conf, uint32_t node_id)
+{
+	if (node_id >= conf->nb_leaf_nodes_max &&
+	    node_id < conf->nb_nodes_max - 1)
+		return node_id - conf->nb_leaf_nodes_max;
+	else
+		return 0;
+}
+
+void hns3_tm_conf_init(struct rte_eth_dev *dev);
+void hns3_tm_conf_uninit(struct rte_eth_dev *dev);
+int hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
+void hns3_tm_dev_start_proc(struct hns3_hw *hw);
+void hns3_tm_dev_stop_proc(struct hns3_hw *hw);
+int hns3_tm_conf_update(struct hns3_hw *hw);
+
+#endif /* _HNS3_TM_H */
diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
index 45cee34..40f59b2 100644
--- a/drivers/net/hns3/meson.build
+++ b/drivers/net/hns3/meson.build
@@ -25,7 +25,8 @@ sources = files('hns3_cmd.c',
 	'hns3_rss.c',
 	'hns3_rxtx.c',
 	'hns3_stats.c',
-	'hns3_mp.c')
+	'hns3_mp.c',
+	'hns3_tm.c')
 
 deps += ['hash']
 
-- 
2.7.4


  parent reply	other threads:[~2021-01-14 13:35 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 1/8] net/hns3: use C11 atomics builtins for resetting Lijun Ou
2021-01-14 13:33 ` Lijun Ou [this message]
2021-01-19  0:49   ` [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function Ferruh Yigit
2021-01-19  8:06     ` oulijun
2021-01-14 13:33 ` [dpdk-dev] [PATCH 3/8] net/hns3: fix VF query link status in dev init Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 4/8] net/hns3: use new opcode for clearing hardware resource Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 5/8] net/hns3: fix register length when dump registers Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 6/8] net/hns3: fix data overwriting during register dump Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 7/8] net/hns3: fix dump register out of range Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 8/8] net/hns3: remove unused assignment for RSS key Lijun Ou
2021-01-19  0:49 ` [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1610631217-14216-3-git-send-email-oulijun@huawei.com \
    --to=oulijun@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).