DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3
@ 2021-01-14 13:33 Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 1/8] net/hns3: use C11 atomics builtins for resetting Lijun Ou
                   ` (8 more replies)
  0 siblings, 9 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

This series add TM feature support and fix some
bugs for hns3 pmd driver. Because the TM need to
use rte_atomicNN_xxx for the resetting of the
hns3_reset_data structure. Therefore it needs to
add a new updates patch for using C11 atomics
builtins for resetting.

Chengchang Tang (3):
  net/hns3: fix register length when dump registers
  net/hns3: fix data overwriting during register dump
  net/hns3: fix dump register out of range

Chengwen Feng (2):
  net/hns3: support RTE TM get ops function
  net/hns3: fix VF query link status in dev init

Hongbo Zheng (1):
  net/hns3: use new opcode for clearing hardware resource

Lijun Ou (2):
  net/hns3: use C11 atomics builtins for resetting
  net/hns3: remove unused assignment for RSS key

 drivers/net/hns3/hns3_cmd.h       |    2 +-
 drivers/net/hns3/hns3_dcb.c       |  221 ++++---
 drivers/net/hns3/hns3_dcb.h       |    3 +
 drivers/net/hns3/hns3_ethdev.c    |   34 +-
 drivers/net/hns3/hns3_ethdev.h    |   16 +-
 drivers/net/hns3/hns3_ethdev_vf.c |   21 +-
 drivers/net/hns3/hns3_intr.c      |    8 +-
 drivers/net/hns3/hns3_regs.c      |   82 ++-
 drivers/net/hns3/hns3_rxtx.c      |    2 +-
 drivers/net/hns3/hns3_tm.c        | 1291 +++++++++++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_tm.h        |  103 +++
 drivers/net/hns3/meson.build      |    3 +-
 12 files changed, 1623 insertions(+), 163 deletions(-)
 create mode 100644 drivers/net/hns3/hns3_tm.c
 create mode 100644 drivers/net/hns3/hns3_tm.h

-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 1/8] net/hns3: use C11 atomics builtins for resetting
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function Lijun Ou
                   ` (7 subsequent siblings)
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

Use C11 atomic builtins with explicit ordering instead of
rte_atomic ops with the resetting member of hns3_reset_data
structure.

Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_dcb.c       |  5 +++--
 drivers/net/hns3/hns3_ethdev.c    |  8 ++++----
 drivers/net/hns3/hns3_ethdev.h    |  2 +-
 drivers/net/hns3/hns3_ethdev_vf.c | 12 ++++++------
 drivers/net/hns3/hns3_intr.c      |  8 ++++----
 drivers/net/hns3/hns3_rxtx.c      |  2 +-
 6 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index fb50179..b32d5af 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -633,7 +633,7 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
 	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
 	 * stage of the reset process.
 	 */
-	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
 		for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
 			rss_cfg->rss_indirection_tbl[i] =
 							i % hw->alloc_rss_size;
@@ -1562,7 +1562,8 @@ hns3_dcb_configure(struct hns3_adapter *hns)
 	int ret;
 
 	hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
-	if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
+	if (map_changed ||
+	    __atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED)) {
 		ret = hns3_dcb_info_update(hns, num_tc);
 		if (ret) {
 			hns3_err(hw, "dcb info update failed: %d", ret);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 90544fe..888338a 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -1017,7 +1017,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	 * ensure that the hardware configuration remains unchanged before and
 	 * after reset.
 	 */
-	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
 		hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
 		hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
 	}
@@ -1041,7 +1041,7 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	 * we will restore configurations to hardware in hns3_restore_vlan_table
 	 * and hns3_restore_vlan_conf later.
 	 */
-	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
 		ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
 		if (ret) {
 			hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -4872,7 +4872,7 @@ hns3_dev_start(struct rte_eth_dev *dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (rte_atomic16_read(&hw->reset.resetting))
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -5018,7 +5018,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 	rte_delay_ms(hw->tqps_num);
 
 	rte_spinlock_lock(&hw->lock);
-	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
 		hns3_stop_tqps(hw);
 		hns3_do_stop(hns);
 		hns3_unmap_rx_interrupt(dev);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 31f78a1..0d86683 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -350,7 +350,7 @@ struct hns3_reset_data {
 	enum hns3_reset_stage stage;
 	rte_atomic16_t schedule;
 	/* Reset flag, covering the entire reset process */
-	rte_atomic16_t resetting;
+	uint16_t resetting;
 	/* Used to disable sending cmds during reset */
 	rte_atomic16_t disable_cmd;
 	/* The reset level being processed */
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index f09cabc..3809824 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -898,7 +898,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	 * MTU value issued by hns3 VF PMD driver must be less than or equal to
 	 * PF's MTU.
 	 */
-	if (rte_atomic16_read(&hw->reset.resetting)) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
 		hns3_err(hw, "Failed to set mtu during resetting");
 		return -EIO;
 	}
@@ -1438,7 +1438,7 @@ hns3vf_request_link_info(struct hns3_hw *hw)
 	uint8_t resp_msg;
 	int ret;
 
-	if (rte_atomic16_read(&hw->reset.resetting))
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
 		return;
 	ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
 				&resp_msg, sizeof(resp_msg));
@@ -1471,7 +1471,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (rte_atomic16_read(&hw->reset.resetting)) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
 		hns3_err(hw,
 			 "vf set vlan id failed during resetting, vlan_id =%u",
 			 vlan_id);
@@ -1510,7 +1510,7 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	unsigned int tmp_mask;
 	int ret = 0;
 
-	if (rte_atomic16_read(&hw->reset.resetting)) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
 		hns3_err(hw, "vf set vlan offload failed during resetting, "
 			     "mask = 0x%x", mask);
 		return -EIO;
@@ -1957,7 +1957,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
 	rte_delay_ms(hw->tqps_num);
 
 	rte_spinlock_lock(&hw->lock);
-	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
 		hns3_stop_tqps(hw);
 		hns3vf_do_stop(hns);
 		hns3vf_unmap_rx_interrupt(dev);
@@ -2188,7 +2188,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (rte_atomic16_read(&hw->reset.resetting))
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 99c500d..51f19b4 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -1761,7 +1761,7 @@ hns3_reset_init(struct hns3_hw *hw)
 	hw->reset.stage = RESET_STAGE_NONE;
 	hw->reset.request = 0;
 	hw->reset.pending = 0;
-	rte_atomic16_init(&hw->reset.resetting);
+	hw->reset.resetting = 0;
 	rte_atomic16_init(&hw->reset.disable_cmd);
 	hw->reset.wait_data = rte_zmalloc("wait_data",
 					  sizeof(struct hns3_wait_data), 0);
@@ -2011,7 +2011,7 @@ hns3_reset_pre(struct hns3_adapter *hns)
 	int ret;
 
 	if (hw->reset.stage == RESET_STAGE_NONE) {
-		rte_atomic16_set(&hns->hw.reset.resetting, 1);
+		__atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
 		hw->reset.stage = RESET_STAGE_DOWN;
 		ret = hw->reset.ops->stop_service(hns);
 		gettimeofday(&tv, NULL);
@@ -2098,7 +2098,7 @@ hns3_reset_post(struct hns3_adapter *hns)
 		/* IMP will wait ready flag before reset */
 		hns3_notify_reset_ready(hw, false);
 		hns3_clear_reset_level(hw, &hw->reset.pending);
-		rte_atomic16_clear(&hns->hw.reset.resetting);
+		__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
 		hw->reset.attempts = 0;
 		hw->reset.stats.success_cnt++;
 		hw->reset.stage = RESET_STAGE_NONE;
@@ -2223,7 +2223,7 @@ hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level)
 			hw->reset.mbuf_deferred_free = false;
 		}
 		rte_spinlock_unlock(&hw->lock);
-		rte_atomic16_clear(&hns->hw.reset.resetting);
+		__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
 		hw->reset.stage = RESET_STAGE_NONE;
 		gettimeofday(&tv, NULL);
 		timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 88d3bab..d4608ac 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -3744,7 +3744,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
 	eth_tx_prep_t prep = NULL;
 
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
-	    rte_atomic16_read(&hns->hw.reset.resetting) == 0) {
+	    __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
 		eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
 		eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
 		eth_dev->tx_pkt_prepare = prep;
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 1/8] net/hns3: use C11 atomics builtins for resetting Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-19  0:49   ` Ferruh Yigit
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 3/8] net/hns3: fix VF query link status in dev init Lijun Ou
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

From: Chengwen Feng <fengchengwen@huawei.com>

This patch support RTE TM ops function for PF, which could
used to:
1. config port's peak rate.
2. config TC's peak rate.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_dcb.c    |  216 ++++---
 drivers/net/hns3/hns3_dcb.h    |    3 +
 drivers/net/hns3/hns3_ethdev.c |   20 +-
 drivers/net/hns3/hns3_ethdev.h |   14 +
 drivers/net/hns3/hns3_tm.c     | 1291 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_tm.h     |  103 ++++
 drivers/net/hns3/meson.build   |    3 +-
 7 files changed, 1554 insertions(+), 96 deletions(-)
 create mode 100644 drivers/net/hns3/hns3_tm.c
 create mode 100644 drivers/net/hns3/hns3_tm.h

diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index b32d5af..5aa374c 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -76,16 +76,13 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
 		shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
 	} else if (ir_calc > ir) {
 		/* Increasing the denominator to select ir_s value */
-		do {
+		while (ir_calc >= ir && ir) {
 			ir_s_calc++;
 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
-		} while (ir_calc > ir);
+		}
 
-		if (ir_calc == ir)
-			shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
-		else
-			shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
-				 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
+		shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+				    (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
 	} else {
 		/*
 		 * Increasing the numerator to select ir_u value. ir_u_calc will
@@ -320,6 +317,10 @@ hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
 {
 	uint32_t shapping_para = 0;
 
+	/* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
+	if (ir_b == 0)
+		return shapping_para;
+
 	hns3_dcb_set_field(shapping_para, IR_B, ir_b);
 	hns3_dcb_set_field(shapping_para, IR_U, ir_u);
 	hns3_dcb_set_field(shapping_para, IR_S, ir_s);
@@ -402,14 +403,57 @@ hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-static int
-hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
+int
+hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
 {
-	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_shaper_parameter shaper_parameter;
-	struct hns3_pf *pf = &hns->pf;
 	uint32_t ir_u, ir_b, ir_s;
 	uint32_t shaper_para;
+	int ret;
+
+	/* Calc shaper para */
+	ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
+				    &shaper_parameter);
+	if (ret) {
+		hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
+			 ret);
+		return ret;
+	}
+
+	shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
+
+	ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
+				       shaper_para, rate);
+	if (ret) {
+		hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
+			 ret);
+		return ret;
+	}
+
+	ir_b = shaper_parameter.ir_b;
+	ir_u = shaper_parameter.ir_u;
+	ir_s = shaper_parameter.ir_s;
+	shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
+
+	ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
+				       shaper_para, rate);
+	if (ret) {
+		hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
+			 ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
 	uint32_t rate;
 	uint8_t i;
 	int ret;
@@ -421,44 +465,9 @@ hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
 	/* Pg to pri */
 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
 		rate = hw->dcb_info.pg_info[i].bw_limit;
-
-		/* Calc shaper para */
-		ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
-					    &shaper_parameter);
-		if (ret) {
-			hns3_err(hw, "calculate shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
-
-		shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
-
-		ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
-					       shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config PG CIR shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
-
-		ir_b = shaper_parameter.ir_b;
-		ir_u = shaper_parameter.ir_u;
-		ir_s = shaper_parameter.ir_s;
-		shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
-
-		ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
-					       shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config PG PIR shaper parameter failed: %d",
-				 ret);
+		ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
+		if (ret)
 			return ret;
-		}
 	}
 
 	return 0;
@@ -530,74 +539,75 @@ hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-static int
-hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
+int
+hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
 {
 	struct hns3_shaper_parameter shaper_parameter;
 	uint32_t ir_u, ir_b, ir_s;
 	uint32_t shaper_para;
-	uint32_t rate;
-	int ret, i;
+	int ret;
 
-	for (i = 0; i < hw->dcb_info.num_tc; i++) {
-		rate = hw->dcb_info.tc_info[i].bw_limit;
-		ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
-					    &shaper_parameter);
-		if (ret) {
-			hns3_err(hw, "calculate shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
+	ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
+				    &shaper_parameter);
+	if (ret) {
+		hns3_err(hw, "calculate shaper parameter failed: %d.",
+			 ret);
+		return ret;
+	}
 
-		shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
+	shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
 
-		ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
-						shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config priority CIR shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
+	ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
+					shaper_para, rate);
+	if (ret) {
+		hns3_err(hw,
+			 "config priority CIR shaper parameter failed: %d.",
+			 ret);
+		return ret;
+	}
 
-		ir_b = shaper_parameter.ir_b;
-		ir_u = shaper_parameter.ir_u;
-		ir_s = shaper_parameter.ir_s;
-		shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
-							 HNS3_SHAPER_BS_U_DEF,
-							 HNS3_SHAPER_BS_S_DEF);
+	ir_b = shaper_parameter.ir_b;
+	ir_u = shaper_parameter.ir_u;
+	ir_s = shaper_parameter.ir_s;
+	shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
+						 HNS3_SHAPER_BS_U_DEF,
+						 HNS3_SHAPER_BS_S_DEF);
 
-		ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
-						shaper_para, rate);
-		if (ret) {
-			hns3_err(hw,
-				 "config priority PIR shaper parameter failed: %d",
-				 ret);
-			return ret;
-		}
+	ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
+					shaper_para, rate);
+	if (ret) {
+		hns3_err(hw,
+			 "config priority PIR shaper parameter failed: %d.",
+			 ret);
+		return ret;
 	}
 
 	return 0;
 }
 
-
 static int
 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
 {
-	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
-	struct hns3_pf *pf = &hns->pf;
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	uint32_t rate;
+	uint8_t i;
 	int ret;
 
 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
 		return -EINVAL;
 
-	ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
-	if (ret)
-		hns3_err(hw, "config port shaper failed: %d", ret);
+	for (i = 0; i < hw->dcb_info.num_tc; i++) {
+		rate = hw->dcb_info.tc_info[i].bw_limit;
+		ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
+		if (ret) {
+			hns3_err(hw, "config pri shaper failed: %d.", ret);
+			return ret;
+		}
+	}
 
-	return ret;
+	return 0;
 }
 
 static int
@@ -680,6 +690,26 @@ hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
 	return 0;
 }
 
+uint8_t
+hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
+{
+	struct hns3_tc_queue_info *tc_queue;
+	uint8_t i;
+
+	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+		tc_queue = &hw->tc_queue[i];
+		if (!tc_queue->enable)
+			continue;
+
+		if (txq_no >= tc_queue->tqp_offset &&
+		    txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
+			return i;
+	}
+
+	/* return TC0 in default case */
+	return 0;
+}
+
 int
 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
 {
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index fee23d9..8248434 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -209,5 +209,8 @@ int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
 
 int hns3_dcb_cfg_update(struct hns3_adapter *hns);
 int hns3_dcb_port_shaper_cfg(struct hns3_hw *hw);
+int hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate);
+int hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate);
+uint8_t hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no);
 
 #endif /* _HNS3_DCB_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 888338a..b6308ee 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -5,7 +5,6 @@
 #include <rte_alarm.h>
 #include <rte_bus_pci.h>
 #include <rte_ethdev_pci.h>
-#include <rte_io.h>
 #include <rte_pci.h>
 
 #include "hns3_ethdev.h"
@@ -2494,7 +2493,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	return 0;
 }
 
-static int
+int
 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
@@ -4679,6 +4678,8 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_enable_intr;
 	}
 
+	hns3_tm_conf_init(eth_dev);
+
 	return 0;
 
 err_enable_intr:
@@ -4712,6 +4713,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	hns3_tm_conf_uninit(eth_dev);
 	hns3_enable_hw_error_intr(hns, false);
 	hns3_rss_uninit(hns);
 	(void)hns3_config_gro(hw, false);
@@ -4739,6 +4741,16 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
 	if (ret)
 		return ret;
 
+	/*
+	 * The hns3_dcb_cfg_update may configure TM module, so
+	 * hns3_tm_conf_update must called later.
+	 */
+	ret = hns3_tm_conf_update(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
+		return ret;
+	}
+
 	ret = hns3_init_queues(hns, reset_queue);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
@@ -4936,6 +4948,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
 	 */
 	hns3_start_tqps(hw);
 
+	hns3_tm_dev_start_proc(hw);
+
 	hns3_info(hw, "hns3 dev start successful!");
 	return 0;
 }
@@ -5019,6 +5033,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 
 	rte_spinlock_lock(&hw->lock);
 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+		hns3_tm_dev_stop_proc(hw);
 		hns3_stop_tqps(hw);
 		hns3_do_stop(hns);
 		hns3_unmap_rx_interrupt(dev);
@@ -6089,6 +6104,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.fec_get_capability     = hns3_fec_get_capability,
 	.fec_get                = hns3_fec_get,
 	.fec_set                = hns3_fec_set,
+	.tm_ops_get             = hns3_tm_ops_get,
 };
 
 static const struct hns3_reset_ops hns3_reset_ops = {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 0d86683..0d17170 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -7,12 +7,16 @@
 
 #include <sys/time.h>
 #include <rte_ethdev_driver.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
 
 #include "hns3_cmd.h"
 #include "hns3_mbx.h"
 #include "hns3_rss.h"
 #include "hns3_fdir.h"
 #include "hns3_stats.h"
+#include "hns3_tm.h"
 
 /* Vendor ID */
 #define PCI_VENDOR_ID_HUAWEI			0x19e5
@@ -727,6 +731,8 @@ struct hns3_pf {
 
 	struct hns3_fdir_info fdir; /* flow director info */
 	LIST_HEAD(counters, hns3_flow_counter) flow_counters;
+
+	struct hns3_tm_conf tm_conf;
 };
 
 struct hns3_vf {
@@ -796,6 +802,12 @@ struct hns3_adapter {
 #define HNS3_DEV_HW_TO_ADAPTER(hw) \
 	container_of(hw, struct hns3_adapter, hw)
 
+static inline struct hns3_pf *HNS3_DEV_HW_TO_PF(struct hns3_hw *hw)
+{
+	struct hns3_adapter *adapter = HNS3_DEV_HW_TO_ADAPTER(hw);
+	return &adapter->pf;
+}
+
 #define hns3_set_field(origin, mask, shift, val) \
 	do { \
 		(origin) &= (~(mask)); \
@@ -937,6 +949,8 @@ bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
 void hns3_update_link_status(struct hns3_hw *hw);
 void hns3_ether_format_addr(char *buf, uint16_t size,
 			const struct rte_ether_addr *ether_addr);
+int hns3_dev_infos_get(struct rte_eth_dev *eth_dev,
+		       struct rte_eth_dev_info *info);
 
 static inline bool
 is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
new file mode 100644
index 0000000..d1639d4
--- /dev/null
+++ b/drivers/net/hns3/hns3_tm.c
@@ -0,0 +1,1291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020-2020 Hisilicon Limited.
+ */
+
+#include <rte_malloc.h>
+
+#include "hns3_ethdev.h"
+#include "hns3_dcb.h"
+#include "hns3_logs.h"
+#include "hns3_tm.h"
+
+static inline uint32_t
+hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
+{
+	/*
+	 * This API will called in pci device probe stage, we can't call
+	 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
+	 * not setup), so we call the hns3_dev_infos_get.
+	 */
+	struct rte_eth_dev_info dev_info;
+
+	memset(&dev_info, 0, sizeof(dev_info));
+	(void)hns3_dev_infos_get(dev, &dev_info);
+	return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
+}
+
+void
+hns3_tm_conf_init(struct rte_eth_dev *dev)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
+	pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
+	pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
+
+	TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+	pf->tm_conf.nb_shaper_profile = 0;
+
+	pf->tm_conf.root = NULL;
+	TAILQ_INIT(&pf->tm_conf.tc_list);
+	TAILQ_INIT(&pf->tm_conf.queue_list);
+	pf->tm_conf.nb_tc_node = 0;
+	pf->tm_conf.nb_queue_node = 0;
+
+	pf->tm_conf.committed = false;
+}
+
+void
+hns3_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_shaper_profile *shaper_profile;
+	struct hns3_tm_node *tm_node;
+
+	if (pf->tm_conf.nb_queue_node > 0) {
+		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+			TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+			rte_free(tm_node);
+		}
+		pf->tm_conf.nb_queue_node = 0;
+	}
+
+	if (pf->tm_conf.nb_tc_node > 0) {
+		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+			TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+			rte_free(tm_node);
+		}
+		pf->tm_conf.nb_tc_node = 0;
+	}
+
+	if (pf->tm_conf.root != NULL) {
+		rte_free(pf->tm_conf.root);
+		pf->tm_conf.root = NULL;
+	}
+
+	if (pf->tm_conf.nb_shaper_profile > 0) {
+		while ((shaper_profile =
+		       TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+			TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+				     shaper_profile, node);
+			rte_free(shaper_profile);
+		}
+		pf->tm_conf.nb_shaper_profile = 0;
+	}
+
+	pf->tm_conf.nb_leaf_nodes_max = 0;
+	pf->tm_conf.nb_nodes_max = 0;
+	pf->tm_conf.nb_shaper_profile_max = 0;
+}
+
+static inline uint64_t
+hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
+{
+#define FIRMWARE_TO_TM_RATE_SCALE	125000
+	/* tm rate unit is Bps, firmware rate is Mbps */
+	return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
+}
+
+static inline uint32_t
+hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
+{
+#define TM_TO_FIRMWARE_RATE_SCALE	125000
+	/* tm rate unit is Bps, firmware rate is Mbps */
+	return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
+}
+
+static int
+hns3_tm_capabilities_get(struct rte_eth_dev *dev,
+			 struct rte_tm_capabilities *cap,
+			 struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	if (cap == NULL || error == NULL)
+		return -EINVAL;
+
+	error->type = RTE_TM_ERROR_TYPE_NONE;
+
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
+	cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+	cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
+	cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
+	cap->shaper_private_dual_rate_n_max = 0;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max =
+		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
+	cap->shaper_shared_n_max = 0;
+	cap->shaper_shared_n_nodes_per_shaper_max = 0;
+	cap->shaper_shared_n_shapers_per_node_max = 0;
+	cap->shaper_shared_dual_rate_n_max = 0;
+	cap->shaper_shared_rate_min = 0;
+	cap->shaper_shared_rate_max = 0;
+
+	cap->sched_n_children_max = max_tx_queues;
+	cap->sched_sp_n_priorities_max = 1;
+	cap->sched_wfq_n_children_per_group_max = 0;
+	cap->sched_wfq_n_groups_max = 0;
+	cap->sched_wfq_weight_max = 1;
+
+	cap->cman_head_drop_supported = 0;
+	cap->dynamic_update_mask = 0;
+	cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+	cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+	cap->cman_wred_context_n_max = 0;
+	cap->cman_wred_context_private_n_max = 0;
+	cap->cman_wred_context_shared_n_max = 0;
+	cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+	cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+	cap->stats_mask = 0;
+
+	return 0;
+}
+
+static struct hns3_tm_shaper_profile *
+hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
+			      uint32_t shaper_profile_id)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_shaper_profile_list *shaper_profile_list =
+		&pf->tm_conf.shaper_profile_list;
+	struct hns3_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
+static int
+hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
+				   struct rte_tm_shaper_params *profile,
+				   struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (profile->committed.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+		error->message = "committed rate not supported";
+		return -EINVAL;
+	}
+
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+
+	if (profile->peak.rate >
+	    hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+		error->message = "peak rate too large";
+		return -EINVAL;
+	}
+
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	if (profile->packet_mode) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
+		error->message = "packet mode not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id,
+			   struct rte_tm_shaper_params *profile,
+			   struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (profile == NULL || error == NULL)
+		return -EINVAL;
+
+	if (pf->tm_conf.nb_shaper_profile >=
+	    pf->tm_conf.nb_shaper_profile_max) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "too much profiles";
+		return -EINVAL;
+	}
+
+	ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
+				     sizeof(struct hns3_tm_shaper_profile),
+				     0);
+	if (shaper_profile == NULL)
+		return -ENOMEM;
+
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	memcpy(&shaper_profile->profile, profile,
+	       sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+	pf->tm_conf.nb_shaper_profile++;
+
+	return 0;
+}
+
+static int
+hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id,
+			   struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_shaper_profile *shaper_profile;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
+	if (shaper_profile == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+	pf->tm_conf.nb_shaper_profile--;
+
+	return 0;
+}
+
+static struct hns3_tm_node *
+hns3_tm_node_search(struct rte_eth_dev *dev,
+		    uint32_t node_id,
+		    enum hns3_tm_node_type *node_type)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct hns3_tm_node *tm_node;
+
+	if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+		*node_type = HNS3_TM_NODE_TYPE_PORT;
+		return pf->tm_conf.root;
+	}
+
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = HNS3_TM_NODE_TYPE_TC;
+			return tm_node;
+		}
+	}
+
+	TAILQ_FOREACH(tm_node, queue_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = HNS3_TM_NODE_TYPE_QUEUE;
+			return tm_node;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
+				 struct rte_tm_node_params *params,
+				 struct rte_tm_error *error)
+{
+	struct hns3_tm_shaper_profile *shaper_profile;
+
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = hns3_tm_shaper_profile_search(dev,
+				 params->shaper_profile_id);
+		if (shaper_profile == NULL) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
+	if (params->nonleaf.wfq_weight_mode) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+		error->message = "WFQ not supported";
+		return -EINVAL;
+	}
+
+	if (params->nonleaf.n_sp_priorities != 1) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+		error->message = "SP priority not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
+			      struct rte_tm_node_params *params,
+			      struct rte_tm_error *error)
+
+{
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+		error->message = "shaper not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.cman) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+		error->message = "congestion management not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.wred.shared_wred_context_id) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	if (params->leaf.wred.n_shared_wred_contexts) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+			 uint32_t priority, uint32_t weight,
+			 struct rte_tm_node_params *params,
+			 struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	if (hns3_tm_node_search(dev, node_id, &node_type)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used";
+		return -EINVAL;
+	}
+
+	if (priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority should be 0";
+		return -EINVAL;
+	}
+
+	if (weight != 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight must be 1";
+		return -EINVAL;
+	}
+
+	if (params->shared_shaper_id) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+	if (params->n_shared_shapers) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+
+	if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
+		return hns3_tm_nonleaf_node_param_check(dev, params, error);
+	else
+		return hns3_tm_leaf_node_param_check(dev, params, error);
+}
+
+static int
+hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		      uint32_t level_id, struct rte_tm_node_params *params,
+		      struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node *tm_node;
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != HNS3_TM_NODE_LEVEL_PORT) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "wrong level";
+		return -EINVAL;
+	}
+
+	if (node_id != pf->tm_conf.nb_nodes_max - 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid port node ID";
+		return -EINVAL;
+	}
+
+	if (pf->tm_conf.root) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "already have a root";
+		return -EINVAL;
+	}
+
+	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
+	if (tm_node == NULL)
+		return -ENOMEM;
+
+	tm_node->id = node_id;
+	tm_node->reference_count = 0;
+	tm_node->parent = NULL;
+	tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
+				  params->shaper_profile_id);
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+	pf->tm_conf.root = tm_node;
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		    uint32_t level_id, struct hns3_tm_node *parent_node,
+		    struct rte_tm_node_params *params,
+		    struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node *tm_node;
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != HNS3_TM_NODE_LEVEL_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "wrong level";
+		return -EINVAL;
+	}
+
+	if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
+	    node_id < pf->tm_conf.nb_leaf_nodes_max ||
+	    hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid tc node ID";
+		return -EINVAL;
+	}
+
+	if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "too many TCs";
+		return -EINVAL;
+	}
+
+	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
+	if (tm_node == NULL)
+		return -ENOMEM;
+
+	tm_node->id = node_id;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
+					params->shaper_profile_id);
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
+	pf->tm_conf.nb_tc_node++;
+	tm_node->parent->reference_count++;
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		       uint32_t level_id, struct hns3_tm_node *parent_node,
+		       struct rte_tm_node_params *params,
+		       struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct hns3_tm_node *tm_node;
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "wrong level";
+		return -EINVAL;
+	}
+
+	/* note: dev->data->nb_tx_queues <= max_tx_queues */
+	if (node_id >= dev->data->nb_tx_queues) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid queue node ID";
+		return -EINVAL;
+	}
+
+	if (hns3_txq_mapped_tc_get(hw, node_id) !=
+	    hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "queue's TC not match parent's TC";
+		return -EINVAL;
+	}
+
+	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
+	if (tm_node == NULL)
+		return -ENOMEM;
+
+	tm_node->id = node_id;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
+	pf->tm_conf.nb_queue_node++;
+	tm_node->parent->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+		 uint32_t parent_node_id, uint32_t priority,
+		 uint32_t weight, uint32_t level_id,
+		 struct rte_tm_node_params *params,
+		 struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_node *parent_node;
+	int ret;
+
+	if (params == NULL || error == NULL)
+		return -EINVAL;
+
+	if (pf->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
+				       params, error);
+	if (ret)
+		return ret;
+
+	/* root node who don't have a parent */
+	if (parent_node_id == RTE_TM_NODE_ID_NULL)
+		return hns3_tm_port_node_add(dev, node_id, level_id,
+					     params, error);
+
+	parent_node = hns3_tm_node_search(dev, parent_node_id,
+					  &parent_node_type);
+	if (parent_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent not exist";
+		return -EINVAL;
+	}
+
+	if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
+	    parent_node_type != HNS3_TM_NODE_TYPE_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent is not port or TC";
+		return -EINVAL;
+	}
+
+	if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
+		return hns3_tm_tc_node_add(dev, node_id, level_id,
+					   parent_node, params, error);
+	else
+		return hns3_tm_queue_node_add(dev, node_id, level_id,
+					      parent_node, params, error);
+}
+
+static void
+hns3_tm_node_do_delete(struct hns3_pf *pf,
+		       enum hns3_tm_node_type node_type,
+		       struct hns3_tm_node *tm_node)
+{
+	if (node_type == HNS3_TM_NODE_TYPE_PORT) {
+		if (tm_node->shaper_profile)
+			tm_node->shaper_profile->reference_count--;
+		rte_free(tm_node);
+		pf->tm_conf.root = NULL;
+		return;
+	}
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count--;
+	tm_node->parent->reference_count--;
+	if (node_type == HNS3_TM_NODE_TYPE_TC) {
+		TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+		pf->tm_conf.nb_tc_node--;
+	} else {
+		TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+		pf->tm_conf.nb_queue_node--;
+	}
+	rte_free(tm_node);
+}
+
+static int
+hns3_tm_node_delete(struct rte_eth_dev *dev,
+		    uint32_t node_id,
+		    struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_node *tm_node;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	if (pf->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (tm_node->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "cannot delete a node which has children";
+		return -EINVAL;
+	}
+
+	hns3_tm_node_do_delete(pf, node_type, tm_node);
+
+	return 0;
+}
+
+static int
+hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+		      int *is_leaf, struct rte_tm_error *error)
+{
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_node *tm_node;
+
+	if (is_leaf == NULL || error == NULL)
+		return -EINVAL;
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
+		*is_leaf = true;
+	else
+		*is_leaf = false;
+
+	return 0;
+}
+
+static void
+hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev,
+				       uint32_t level_id,
+				       struct rte_tm_level_capabilities *cap)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
+		cap->n_nodes_max = 1;
+		cap->n_nodes_nonleaf_max = 1;
+		cap->n_nodes_leaf_max = 0;
+	} else {
+		cap->n_nodes_max = HNS3_MAX_TC_NUM;
+		cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
+		cap->n_nodes_leaf_max = 0;
+	}
+
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+
+	cap->nonleaf.shaper_private_supported = true;
+	cap->nonleaf.shaper_private_dual_rate_supported = false;
+	cap->nonleaf.shaper_private_rate_min = 0;
+	cap->nonleaf.shaper_private_rate_max =
+		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
+	cap->nonleaf.shaper_shared_n_max = 0;
+	if (level_id == HNS3_TM_NODE_LEVEL_PORT)
+		cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
+	else
+		cap->nonleaf.sched_n_children_max = max_tx_queues;
+	cap->nonleaf.sched_sp_n_priorities_max = 1;
+	cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+	cap->nonleaf.sched_wfq_n_groups_max = 0;
+	cap->nonleaf.sched_wfq_weight_max = 1;
+	cap->nonleaf.stats_mask = 0;
+}
+
+static void
+hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
+				    struct rte_tm_level_capabilities *cap)
+{
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	cap->n_nodes_max = max_tx_queues;
+	cap->n_nodes_nonleaf_max = 0;
+	cap->n_nodes_leaf_max = max_tx_queues;
+
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+
+	cap->leaf.shaper_private_supported = false;
+	cap->leaf.shaper_private_dual_rate_supported = false;
+	cap->leaf.shaper_private_rate_min = 0;
+	cap->leaf.shaper_private_rate_max = 0;
+	cap->leaf.shaper_shared_n_max = 0;
+	cap->leaf.cman_head_drop_supported = false;
+	cap->leaf.cman_wred_context_private_supported = false;
+	cap->leaf.cman_wred_context_shared_n_max = 0;
+	cap->leaf.stats_mask = 0;
+}
+
+static int
+hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
+			       uint32_t level_id,
+			       struct rte_tm_level_capabilities *cap,
+			       struct rte_tm_error *error)
+{
+	if (cap == NULL || error == NULL)
+		return -EINVAL;
+
+	if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "too deep level";
+		return -EINVAL;
+	}
+
+	memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
+
+	if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
+		hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap);
+	else
+		hns3_tm_leaf_level_capabilities_get(dev, cap);
+
+	return 0;
+}
+
+static void
+hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
+				      enum hns3_tm_node_type node_type,
+				      struct rte_tm_node_capabilities *cap)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
+
+	cap->shaper_private_supported = true;
+	cap->shaper_private_dual_rate_supported = false;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max =
+		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
+	cap->shaper_shared_n_max = 0;
+
+	if (node_type == HNS3_TM_NODE_TYPE_PORT)
+		cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
+	else
+		cap->nonleaf.sched_n_children_max = max_tx_queues;
+	cap->nonleaf.sched_sp_n_priorities_max = 1;
+	cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+	cap->nonleaf.sched_wfq_n_groups_max = 0;
+	cap->nonleaf.sched_wfq_weight_max = 1;
+
+	cap->stats_mask = 0;
+}
+
+static void
+hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+				   struct rte_tm_node_capabilities *cap)
+{
+	cap->shaper_private_supported = false;
+	cap->shaper_private_dual_rate_supported = false;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max = 0;
+	cap->shaper_shared_n_max = 0;
+
+	cap->leaf.cman_head_drop_supported = false;
+	cap->leaf.cman_wred_context_private_supported = false;
+	cap->leaf.cman_wred_context_shared_n_max = 0;
+
+	cap->stats_mask = 0;
+}
+
+static int
+hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
+			      uint32_t node_id,
+			      struct rte_tm_node_capabilities *cap,
+			      struct rte_tm_error *error)
+{
+	enum hns3_tm_node_type node_type;
+	struct hns3_tm_node *tm_node;
+
+	if (cap == NULL || error == NULL)
+		return -EINVAL;
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
+
+	if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
+		hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
+	else
+		hns3_tm_leaf_node_capabilities_get(dev, cap);
+
+	return 0;
+}
+
+static int
+hns3_tm_config_port_rate(struct hns3_hw *hw,
+			 struct hns3_tm_shaper_profile *shaper_profile)
+{
+	uint32_t firmware_rate;
+	uint64_t rate;
+
+	if (shaper_profile) {
+		rate = shaper_profile->profile.peak.rate;
+		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
+	} else {
+		firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
+	}
+
+	/*
+	 * The TM shaper topology after device inited:
+	 *     pri0 shaper   --->|
+	 *     pri1 shaper   --->|
+	 *     ...               |----> pg0 shaper ----> port shaper
+	 *     ...               |
+	 *     priX shaper   --->|
+	 *
+	 * Because port shaper rate maybe changed by firmware, to avoid
+	 * concurrent configure, driver use pg0 shaper to achieve the rate limit
+	 * of port.
+	 *
+	 * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
+	 */
+	return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
+}
+
+static int
+hns3_tm_config_tc_rate(struct hns3_hw *hw,
+		       uint8_t tc_no,
+		       struct hns3_tm_shaper_profile *shaper_profile)
+{
+	uint32_t firmware_rate;
+	uint64_t rate;
+
+	if (shaper_profile) {
+		rate = shaper_profile->profile.peak.rate;
+		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
+	} else {
+		firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
+	}
+
+	return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
+}
+
+static bool
+hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct hns3_tm_conf *tm_conf = &pf->tm_conf;
+	struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
+	struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
+	struct hns3_tm_node *tm_node;
+
+	/* TC */
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (!tm_node->reference_count) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "TC without queue assigned";
+			return false;
+		}
+
+		if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
+			hw->num_tc) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "node's TC not exist";
+			return false;
+		}
+	}
+
+	/* Queue */
+	TAILQ_FOREACH(tm_node, queue_list, node) {
+		if (tm_node->id >= hw->data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "node's queue invalid";
+			return false;
+		}
+
+		if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
+		    hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "queue's TC not match parent's TC";
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int
+hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
+			    struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct hns3_tm_node *tm_node;
+	uint8_t tc_no;
+	int ret;
+
+	/* port */
+	tm_node = pf->tm_conf.root;
+	if (tm_node->shaper_profile) {
+		ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
+		if (ret) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+			error->message = "fail to set port peak rate";
+			return -EIO;
+		}
+	}
+
+	/* TC */
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->shaper_profile == NULL)
+			continue;
+
+		tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
+		ret = hns3_tm_config_tc_rate(hw, tc_no,
+					     tm_node->shaper_profile);
+		if (ret) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "fail to set TC peak rate";
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int
+hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
+			 int clear_on_fail,
+			 struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "device is resetting";
+		/* don't goto fail_clear, user may try later */
+		return -EBUSY;
+	}
+
+	if (pf->tm_conf.root == NULL)
+		goto done;
+
+	/* check configure before commit make sure key configure not violated */
+	if (!hns3_tm_configure_check(hw, error))
+		goto fail_clear;
+
+	ret = hns3_tm_hierarchy_do_commit(hw, error);
+	if (ret)
+		goto fail_clear;
+
+done:
+	pf->tm_conf.committed = true;
+	return 0;
+
+fail_clear:
+	if (clear_on_fail) {
+		hns3_tm_conf_uninit(dev);
+		hns3_tm_conf_init(dev);
+	}
+	return -EINVAL;
+}
+
+static int
+hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
+			      int clear_on_fail,
+			      struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
+	rte_spinlock_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int
+hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
+			      uint32_t node_id,
+			      enum hns3_tm_node_type node_type,
+			      struct hns3_tm_shaper_profile *shaper_profile,
+			      struct rte_tm_error *error)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	uint8_t tc_no;
+	int ret;
+
+	if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
+		if (shaper_profile != NULL) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+			error->message = "queue node shaper not supported";
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	if (!pf->tm_conf.committed)
+		return 0;
+
+	if (node_type == HNS3_TM_NODE_TYPE_PORT) {
+		ret = hns3_tm_config_port_rate(hw, shaper_profile);
+		if (ret) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+			error->message = "fail to update port peak rate";
+		}
+
+		return ret;
+	}
+
+	/*
+	 * update TC's shaper
+	 */
+	tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
+	ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
+	if (ret) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "fail to update TC peak rate";
+	}
+
+	return ret;
+}
+
+static int
+hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
+			   uint32_t node_id,
+			   uint32_t shaper_profile_id,
+			   struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
+	struct hns3_tm_shaper_profile *profile = NULL;
+	struct hns3_tm_node *tm_node;
+
+	if (error == NULL)
+		return -EINVAL;
+
+	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "device is resetting";
+		return -EBUSY;
+	}
+
+	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
+	if (tm_node == NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (shaper_profile_id == tm_node->params.shaper_profile_id)
+		return 0;
+
+	if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
+		if (profile == NULL) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+			error->message = "profile ID not exist";
+			return -EINVAL;
+		}
+	}
+
+	if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
+					  profile, error))
+		return -EINVAL;
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count--;
+	tm_node->shaper_profile = profile;
+	tm_node->params.shaper_profile_id = shaper_profile_id;
+	if (profile != NULL)
+		profile->reference_count++;
+
+	return 0;
+}
+
+static int
+hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
+				uint32_t node_id,
+				uint32_t shaper_profile_id,
+				struct rte_tm_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_tm_node_shaper_update(dev, node_id,
+					 shaper_profile_id, error);
+	rte_spinlock_unlock(&hw->lock);
+
+	return ret;
+}
+
+static const struct rte_tm_ops hns3_tm_ops = {
+	.capabilities_get       = hns3_tm_capabilities_get,
+	.shaper_profile_add     = hns3_tm_shaper_profile_add,
+	.shaper_profile_delete  = hns3_tm_shaper_profile_del,
+	.node_add               = hns3_tm_node_add,
+	.node_delete            = hns3_tm_node_delete,
+	.node_type_get          = hns3_tm_node_type_get,
+	.level_capabilities_get = hns3_tm_level_capabilities_get,
+	.node_capabilities_get  = hns3_tm_node_capabilities_get,
+	.hierarchy_commit       = hns3_tm_hierarchy_commit_wrap,
+	.node_shaper_update     = hns3_tm_node_shaper_update_wrap,
+};
+
+int
+hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+		void *arg)
+{
+	if (arg == NULL)
+		return -EINVAL;
+
+	*(const void **)arg = &hns3_tm_ops;
+
+	return 0;
+}
+
+void
+hns3_tm_dev_start_proc(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+
+	if (pf->tm_conf.root && !pf->tm_conf.committed)
+		hns3_warn(hw,
+		    "please call hierarchy_commit() before starting the port.");
+}
+
+/*
+ * We need clear tm_conf committed flag when device stop so that user can modify
+ * tm configuration (e.g. add or delete node).
+ *
+ * If user don't call hierarchy commit when device start later, the Port/TC's
+ * shaper rate still the same as previous committed.
+ *
+ * To avoid the above problem, we need recover Port/TC shaper rate when device
+ * stop.
+ */
+void
+hns3_tm_dev_stop_proc(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct hns3_tm_node *tm_node;
+	uint8_t tc_no;
+
+	if (!pf->tm_conf.committed)
+		return;
+
+	tm_node = pf->tm_conf.root;
+	if (tm_node != NULL && tm_node->shaper_profile)
+		(void)hns3_tm_config_port_rate(hw, NULL);
+
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->shaper_profile == NULL)
+			continue;
+		tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
+		(void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
+	}
+
+	pf->tm_conf.committed = false;
+}
+
+int
+hns3_tm_conf_update(struct hns3_hw *hw)
+{
+	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
+	struct rte_tm_error error;
+
+	if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
+		return 0;
+
+	memset(&error, 0, sizeof(struct rte_tm_error));
+	return hns3_tm_hierarchy_do_commit(hw, &error);
+}
diff --git a/drivers/net/hns3/hns3_tm.h b/drivers/net/hns3/hns3_tm.h
new file mode 100644
index 0000000..d8de3e4
--- /dev/null
+++ b/drivers/net/hns3/hns3_tm.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020-2020 Hisilicon Limited.
+ */
+
+#ifndef _HNS3_TM_H_
+#define _HNS3_TM_H_
+
+#include <stdint.h>
+#include <rte_tailq.h>
+#include <rte_tm_driver.h>
+
+enum hns3_tm_node_type {
+	HNS3_TM_NODE_TYPE_PORT,
+	HNS3_TM_NODE_TYPE_TC,
+	HNS3_TM_NODE_TYPE_QUEUE,
+	HNS3_TM_NODE_TYPE_MAX,
+};
+
+enum hns3_tm_node_level {
+	HNS3_TM_NODE_LEVEL_PORT,
+	HNS3_TM_NODE_LEVEL_TC,
+	HNS3_TM_NODE_LEVEL_QUEUE,
+	HNS3_TM_NODE_LEVEL_MAX,
+};
+
+struct hns3_tm_shaper_profile {
+	TAILQ_ENTRY(hns3_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(hns3_shaper_profile_list, hns3_tm_shaper_profile);
+
+struct hns3_tm_node {
+	TAILQ_ENTRY(hns3_tm_node) node;
+	uint32_t id;
+	uint32_t reference_count;
+	struct hns3_tm_node *parent;
+	struct hns3_tm_shaper_profile *shaper_profile;
+	struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(hns3_tm_node_list, hns3_tm_node);
+
+struct hns3_tm_conf {
+	uint32_t nb_leaf_nodes_max; /* max numbers of leaf nodes */
+	uint32_t nb_nodes_max; /* max numbers of nodes */
+	uint32_t nb_shaper_profile_max; /* max numbers of shaper profile */
+
+	struct hns3_shaper_profile_list shaper_profile_list;
+	uint32_t nb_shaper_profile; /* number of shaper profile */
+
+	struct hns3_tm_node *root;
+	struct hns3_tm_node_list tc_list;
+	struct hns3_tm_node_list queue_list;
+	uint32_t nb_tc_node; /* number of added TC nodes */
+	uint32_t nb_queue_node; /* number of added queue nodes */
+
+	/*
+	 * This flag is used to check if APP can change the TM node
+	 * configuration.
+	 * When it's true, means the configuration is applied to HW,
+	 * APP should not add/delete the TM node configuration.
+	 * When starting the port, APP should call the hierarchy_commit API to
+	 * set this flag to true. When stopping the port, this flag should be
+	 * set to false.
+	 */
+	bool committed;
+};
+
+/*
+ * This API used to calc node TC no. User must make sure the node id is in the
+ * TC node id range.
+ *
+ * User could call rte_eth_dev_info_get API to get port's max_tx_queues, The TM
+ * id's assignment should following the below rules:
+ *     [0, max_tx_queues-1]: correspond queues's node id
+ *     max_tx_queues + 0   : correspond TC0's node id
+ *     max_tx_queues + 1   : correspond TC1's node id
+ *     ...
+ *     max_tx_queues + 7   : correspond TC7's node id
+ *     max_tx_queues + 8   : correspond port's node id
+ *
+ */
+static inline uint8_t
+hns3_tm_calc_node_tc_no(struct hns3_tm_conf *conf, uint32_t node_id)
+{
+	if (node_id >= conf->nb_leaf_nodes_max &&
+	    node_id < conf->nb_nodes_max - 1)
+		return node_id - conf->nb_leaf_nodes_max;
+	else
+		return 0;
+}
+
+void hns3_tm_conf_init(struct rte_eth_dev *dev);
+void hns3_tm_conf_uninit(struct rte_eth_dev *dev);
+int hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
+void hns3_tm_dev_start_proc(struct hns3_hw *hw);
+void hns3_tm_dev_stop_proc(struct hns3_hw *hw);
+int hns3_tm_conf_update(struct hns3_hw *hw);
+
+#endif /* _HNS3_TM_H */
diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
index 45cee34..40f59b2 100644
--- a/drivers/net/hns3/meson.build
+++ b/drivers/net/hns3/meson.build
@@ -25,7 +25,8 @@ sources = files('hns3_cmd.c',
 	'hns3_rss.c',
 	'hns3_rxtx.c',
 	'hns3_stats.c',
-	'hns3_mp.c')
+	'hns3_mp.c',
+	'hns3_tm.c')
 
 deps += ['hash']
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 3/8] net/hns3: fix VF query link status in dev init
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 1/8] net/hns3: use C11 atomics builtins for resetting Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 4/8] net/hns3: use new opcode for clearing hardware resource Lijun Ou
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

From: Chengwen Feng <fengchengwen@huawei.com>

Current hns3vf queryed link status in dev init stage, but the link
status should be maintained in dev start stage, this patch fix this.

Also, in the dev start stage, we use quick query instead of delayed
query to make sure update the link status soon.

Fixes: a5475d61fa34 ("net/hns3: support VF")
Fixes: 958edf6627d5 ("net/hns3: fix VF link status")
Cc: stable@dpdk.org

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_ethdev_vf.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 3809824..787978f 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1749,7 +1749,6 @@ hns3vf_init_hardware(struct hns3_adapter *hns)
 		goto err_init_hardware;
 	}
 
-	hns3vf_request_link_info(hw);
 	return 0;
 
 err_init_hardware:
@@ -2238,7 +2237,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
 	hns3_rx_scattered_calc(dev);
 	hns3_set_rxtx_function(dev);
 	hns3_mp_req_start_rxtx(dev);
-	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+	hns3vf_service_handler(dev);
 
 	hns3vf_restore_filter(dev);
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 4/8] net/hns3: use new opcode for clearing hardware resource
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
                   ` (2 preceding siblings ...)
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 3/8] net/hns3: fix VF query link status in dev init Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 5/8] net/hns3: fix register length when dump registers Lijun Ou
                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

From: Hongbo Zheng <zhenghongbo3@huawei.com>

The original command opcode '0x700A' may cause firmware error,
so '0x700A' is deserted, now use '0x700B' to replace it.

Fixes: 223d9eceaeee ("net/hns3: clear residual hardware configurations on init")
Cc: stable@dpdk.org

Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 194c3a7..e40293b 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -203,7 +203,7 @@ enum hns3_opcode_type {
 	HNS3_OPC_FD_COUNTER_OP          = 0x1205,
 
 	/* Clear hardware state command */
-	HNS3_OPC_CLEAR_HW_STATE         = 0x700A,
+	HNS3_OPC_CLEAR_HW_STATE         = 0x700B,
 
 	/* SFP command */
 	HNS3_OPC_SFP_GET_SPEED          = 0x7104,
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 5/8] net/hns3: fix register length when dump registers
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
                   ` (3 preceding siblings ...)
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 4/8] net/hns3: use new opcode for clearing hardware resource Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 6/8] net/hns3: fix data overwriting during register dump Lijun Ou
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

From: Chengchang Tang <tangchengchang@huawei.com>

Currently, the reg length return by HNS3 is the total length of all the
registers. But for upper layer user, the total register length is the
length multiplied by width. This can lead to a waste of memory and print
some invalid information.

This patch corrects the length and width of the register.

Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_regs.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index b2cc599..32597fe 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -104,6 +104,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	uint32_t cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
 	uint32_t regs_num_32_bit, regs_num_64_bit;
+	uint32_t dfx_reg_lines;
 	uint32_t len;
 	int ret;
 
@@ -117,7 +118,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
 	tqp_intr_lines = sizeof(tqp_intr_reg_addrs) / REG_LEN_PER_LINE + 1;
 
 	len = (cmdq_lines + common_lines + ring_lines * hw->tqps_num +
-	      tqp_intr_lines * hw->num_msi) * REG_LEN_PER_LINE;
+	      tqp_intr_lines * hw->num_msi) * REG_NUM_PER_LINE;
 
 	if (!hns->is_vf) {
 		ret = hns3_get_regs_num(hw, &regs_num_32_bit, &regs_num_64_bit);
@@ -126,8 +127,11 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length)
 				 ret);
 			return -ENOTSUP;
 		}
-		len += regs_num_32_bit * sizeof(uint32_t) +
-		       regs_num_64_bit * sizeof(uint64_t);
+		dfx_reg_lines = regs_num_32_bit * sizeof(uint32_t) /
+					REG_LEN_PER_LINE + 1;
+		dfx_reg_lines += regs_num_64_bit * sizeof(uint64_t) /
+					REG_LEN_PER_LINE + 1;
+		len += dfx_reg_lines * REG_NUM_PER_LINE;
 	}
 
 	*length = len;
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 6/8] net/hns3: fix data overwriting during register dump
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
                   ` (4 preceding siblings ...)
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 5/8] net/hns3: fix register length when dump registers Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 7/8] net/hns3: fix dump register out of range Lijun Ou
                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

From: Chengchang Tang <tangchengchang@huawei.com>

The data pointer has not moved after BAR register dumped. This causes the
later register to overwrite the previous data.

This patch fix the overwriting by move the pointer after every dump
function. And the missing separator between 32-bit register and the 64-bit
register is also added to avoid a parsing error.

Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_regs.c | 70 +++++++++++++++++++++++++-------------------
 1 file changed, 40 insertions(+), 30 deletions(-)

diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index 32597fe..775e096 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -252,63 +252,68 @@ hns3_get_64_bit_regs(struct hns3_hw *hw, uint32_t regs_num, void *data)
 	return 0;
 }
 
-static void
+static int
+hns3_insert_reg_separator(int reg_num, uint32_t *data)
+{
+	int separator_num;
+	int i;
+
+	separator_num = MAX_SEPARATE_NUM - reg_num % REG_NUM_PER_LINE;
+	for (i = 0; i < separator_num; i++)
+		*data++ = SEPARATOR_VALUE;
+	return separator_num;
+}
+
+static int
 hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
 {
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+	uint32_t *origin_data_ptr = data;
 	uint32_t reg_offset;
-	int separator_num;
-	int reg_um;
+	int reg_num;
 	int i, j;
 
 	/* fetching per-PF registers values from PF PCIe register space */
-	reg_um = sizeof(cmdq_reg_addrs) / sizeof(uint32_t);
-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
-	for (i = 0; i < reg_um; i++)
+	reg_num = sizeof(cmdq_reg_addrs) / sizeof(uint32_t);
+	for (i = 0; i < reg_num; i++)
 		*data++ = hns3_read_dev(hw, cmdq_reg_addrs[i]);
-	for (i = 0; i < separator_num; i++)
-		*data++ = SEPARATOR_VALUE;
+	data += hns3_insert_reg_separator(reg_num, data);
 
 	if (hns->is_vf)
-		reg_um = sizeof(common_vf_reg_addrs) / sizeof(uint32_t);
+		reg_num = sizeof(common_vf_reg_addrs) / sizeof(uint32_t);
 	else
-		reg_um = sizeof(common_reg_addrs) / sizeof(uint32_t);
-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
-	for (i = 0; i < reg_um; i++)
+		reg_num = sizeof(common_reg_addrs) / sizeof(uint32_t);
+	for (i = 0; i < reg_num; i++)
 		if (hns->is_vf)
 			*data++ = hns3_read_dev(hw, common_vf_reg_addrs[i]);
 		else
 			*data++ = hns3_read_dev(hw, common_reg_addrs[i]);
-	for (i = 0; i < separator_num; i++)
-		*data++ = SEPARATOR_VALUE;
+	data += hns3_insert_reg_separator(reg_num, data);
 
-	reg_um = sizeof(ring_reg_addrs) / sizeof(uint32_t);
-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+	reg_num = sizeof(ring_reg_addrs) / sizeof(uint32_t);
 	for (j = 0; j < hw->tqps_num; j++) {
 		reg_offset = hns3_get_tqp_reg_offset(j);
-		for (i = 0; i < reg_um; i++)
+		for (i = 0; i < reg_num; i++)
 			*data++ = hns3_read_dev(hw,
 						ring_reg_addrs[i] + reg_offset);
-		for (i = 0; i < separator_num; i++)
-			*data++ = SEPARATOR_VALUE;
+		data += hns3_insert_reg_separator(reg_num, data);
 	}
 
-	reg_um = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
-	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+	reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
 	for (j = 0; j < hw->num_msi; j++) {
 		reg_offset = HNS3_TQP_INTR_REG_SIZE * j;
-		for (i = 0; i < reg_um; i++)
-			*data++ = hns3_read_dev(hw,
-						tqp_intr_reg_addrs[i] +
+		for (i = 0; i < reg_num; i++)
+			*data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] +
 						reg_offset);
-		for (i = 0; i < separator_num; i++)
-			*data++ = SEPARATOR_VALUE;
+		data += hns3_insert_reg_separator(reg_num, data);
 	}
+	return data - origin_data_ptr;
 }
 
 int
 hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
 {
+#define HNS3_64_BIT_REG_SIZE (sizeof(uint64_t) / sizeof(uint32_t))
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
 	uint32_t regs_num_32_bit;
@@ -338,7 +343,7 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
 		return -ENOTSUP;
 
 	/* fetching per-PF registers values from PF PCIe register space */
-	hns3_direct_access_regs(hw, data);
+	data += hns3_direct_access_regs(hw, data);
 
 	if (hns->is_vf)
 		return 0;
@@ -355,11 +360,16 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
 		hns3_err(hw, "Get 32 bit register failed, ret = %d", ret);
 		return ret;
 	}
-
 	data += regs_num_32_bit;
+	data += hns3_insert_reg_separator(regs_num_32_bit, data);
+
 	ret = hns3_get_64_bit_regs(hw, regs_num_64_bit, data);
-	if (ret)
+	if (ret) {
 		hns3_err(hw, "Get 64 bit register failed, ret = %d", ret);
-
+		return ret;
+	}
+	data += regs_num_64_bit * HNS3_64_BIT_REG_SIZE;
+	data += hns3_insert_reg_separator(regs_num_64_bit *
+					  HNS3_64_BIT_REG_SIZE, data);
 	return ret;
 }
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 7/8] net/hns3: fix dump register out of range
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
                   ` (5 preceding siblings ...)
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 6/8] net/hns3: fix data overwriting during register dump Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 8/8] net/hns3: remove unused assignment for RSS key Lijun Ou
  2021-01-19  0:49 ` [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Ferruh Yigit
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

From: Chengchang Tang <tangchengchang@huawei.com>

Currently, when dump the queue interrupt registers, the number of
registers that should be dumped is calculated from num_msi. But the
value of num_msi includes the number of misc interrupts. So, for some
hardware version, like kupeng930, it will lead to an illegal access.

This patch replace num_msi with intr_tqps_num which indicate the
number of interrupts used by the tqps.

Fixes: 936eda25e8da ("net/hns3: support dump register")
Cc: stable@dpdk.org

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_regs.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c
index 775e096..f2cb465 100644
--- a/drivers/net/hns3/hns3_regs.c
+++ b/drivers/net/hns3/hns3_regs.c
@@ -300,7 +300,7 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
 	}
 
 	reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
-	for (j = 0; j < hw->num_msi; j++) {
+	for (j = 0; j < hw->intr_tqps_num; j++) {
 		reg_offset = HNS3_TQP_INTR_REG_SIZE * j;
 		for (i = 0; i < reg_num; i++)
 			*data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] +
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH 8/8] net/hns3: remove unused assignment for RSS key
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
                   ` (6 preceding siblings ...)
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 7/8] net/hns3: fix dump register out of range Lijun Ou
@ 2021-01-14 13:33 ` Lijun Ou
  2021-01-19  0:49 ` [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Ferruh Yigit
  8 siblings, 0 replies; 12+ messages in thread
From: Lijun Ou @ 2021-01-14 13:33 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev

The default RSS key does not need to be configured repeatedly
when call hns3_dev_configure function with the NULL RSS key
because the default RSS key has been configured when the PMD
driver run hns3_do_start function with starting device.

Besides, it will not overwrite the inited key if rte_eth_dev_configure
API will be called directly and RSS key is NULL after init PMD
driver.

Therefore, the assignment for RSS key in hns3_dev_configure
function is unncessary.

Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 6 ------
 drivers/net/hns3/hns3_ethdev_vf.c | 6 ------
 2 files changed, 12 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index b6308ee..292a7e0 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2316,7 +2316,6 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 	struct rte_eth_rss_conf rss_conf;
@@ -2363,11 +2362,6 @@ hns3_dev_configure(struct rte_eth_dev *dev)
 		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 		rss_conf = conf->rx_adv_conf.rss_conf;
 		hw->rss_dis_flag = false;
-		if (rss_conf.rss_key == NULL) {
-			rss_conf.rss_key = rss_cfg->key;
-			rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
-		}
-
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
 		if (ret)
 			goto cfg_err;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 787978f..0e9064f 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -773,7 +773,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
@@ -816,11 +815,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
 		conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 		hw->rss_dis_flag = false;
 		rss_conf = conf->rx_adv_conf.rss_conf;
-		if (rss_conf.rss_key == NULL) {
-			rss_conf.rss_key = rss_cfg->key;
-			rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
-		}
-
 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
 		if (ret)
 			goto cfg_err;
-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function Lijun Ou
@ 2021-01-19  0:49   ` Ferruh Yigit
  2021-01-19  8:06     ` oulijun
  0 siblings, 1 reply; 12+ messages in thread
From: Ferruh Yigit @ 2021-01-19  0:49 UTC (permalink / raw)
  To: Lijun Ou; +Cc: dev

On 1/14/2021 1:33 PM, Lijun Ou wrote:
> From: Chengwen Feng <fengchengwen@huawei.com>
> 
> This patch support RTE TM ops function for PF, which could
> used to:
> 1. config port's peak rate.
> 2. config TC's peak rate.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Signed-off-by: Lijun Ou <oulijun@huawei.com>
> ---
>   drivers/net/hns3/hns3_dcb.c    |  216 ++++---
>   drivers/net/hns3/hns3_dcb.h    |    3 +
>   drivers/net/hns3/hns3_ethdev.c |   20 +-
>   drivers/net/hns3/hns3_ethdev.h |   14 +
>   drivers/net/hns3/hns3_tm.c     | 1291 ++++++++++++++++++++++++++++++++++++++++
>   drivers/net/hns3/hns3_tm.h     |  103 ++++
>   drivers/net/hns3/meson.build   |    3 +-
>   7 files changed, 1554 insertions(+), 96 deletions(-)
>   create mode 100644 drivers/net/hns3/hns3_tm.c
>   create mode 100644 drivers/net/hns3/hns3_tm.h

Can you please update the release notes to announce the PMD TM support? I can 
squash it in next-net.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3
  2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
                   ` (7 preceding siblings ...)
  2021-01-14 13:33 ` [dpdk-dev] [PATCH 8/8] net/hns3: remove unused assignment for RSS key Lijun Ou
@ 2021-01-19  0:49 ` Ferruh Yigit
  8 siblings, 0 replies; 12+ messages in thread
From: Ferruh Yigit @ 2021-01-19  0:49 UTC (permalink / raw)
  To: Lijun Ou; +Cc: dev

On 1/14/2021 1:33 PM, Lijun Ou wrote:
> This series add TM feature support and fix some
> bugs for hns3 pmd driver. Because the TM need to
> use rte_atomicNN_xxx for the resetting of the
> hns3_reset_data structure. Therefore it needs to
> add a new updates patch for using C11 atomics
> builtins for resetting.
> 
> Chengchang Tang (3):
>    net/hns3: fix register length when dump registers
>    net/hns3: fix data overwriting during register dump
>    net/hns3: fix dump register out of range
> 
> Chengwen Feng (2):
>    net/hns3: support RTE TM get ops function
>    net/hns3: fix VF query link status in dev init
> 
> Hongbo Zheng (1):
>    net/hns3: use new opcode for clearing hardware resource
> 
> Lijun Ou (2):
>    net/hns3: use C11 atomics builtins for resetting
>    net/hns3: remove unused assignment for RSS key
> 

Series applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function
  2021-01-19  0:49   ` Ferruh Yigit
@ 2021-01-19  8:06     ` oulijun
  0 siblings, 0 replies; 12+ messages in thread
From: oulijun @ 2021-01-19  8:06 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev



在 2021/1/19 8:49, Ferruh Yigit 写道:
> On 1/14/2021 1:33 PM, Lijun Ou wrote:
>> From: Chengwen Feng <fengchengwen@huawei.com>
>>
>> This patch support RTE TM ops function for PF, which could
>> used to:
>> 1. config port's peak rate.
>> 2. config TC's peak rate.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Signed-off-by: Lijun Ou <oulijun@huawei.com>
>> ---
>>   drivers/net/hns3/hns3_dcb.c    |  216 ++++---
>>   drivers/net/hns3/hns3_dcb.h    |    3 +
>>   drivers/net/hns3/hns3_ethdev.c |   20 +-
>>   drivers/net/hns3/hns3_ethdev.h |   14 +
>>   drivers/net/hns3/hns3_tm.c     | 1291 
>> ++++++++++++++++++++++++++++++++++++++++
>>   drivers/net/hns3/hns3_tm.h     |  103 ++++
>>   drivers/net/hns3/meson.build   |    3 +-
>>   7 files changed, 1554 insertions(+), 96 deletions(-)
>>   create mode 100644 drivers/net/hns3/hns3_tm.c
>>   create mode 100644 drivers/net/hns3/hns3_tm.h
> 
> Can you please update the release notes to announce the PMD TM support? 
> I can squash it in next-net.
> 
OK, I will do it.
> .
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2021-01-19  8:06 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-14 13:33 [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 1/8] net/hns3: use C11 atomics builtins for resetting Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 2/8] net/hns3: support RTE TM get ops function Lijun Ou
2021-01-19  0:49   ` Ferruh Yigit
2021-01-19  8:06     ` oulijun
2021-01-14 13:33 ` [dpdk-dev] [PATCH 3/8] net/hns3: fix VF query link status in dev init Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 4/8] net/hns3: use new opcode for clearing hardware resource Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 5/8] net/hns3: fix register length when dump registers Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 6/8] net/hns3: fix data overwriting during register dump Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 7/8] net/hns3: fix dump register out of range Lijun Ou
2021-01-14 13:33 ` [dpdk-dev] [PATCH 8/8] net/hns3: remove unused assignment for RSS key Lijun Ou
2021-01-19  0:49 ` [dpdk-dev] [PATCH 0/8] TM and some bugfixes for hns3 Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).