DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wenjun Wu <wenjun1.wu@intel.com>
To: dev@dpdk.org, qiming.yang@intel.com, qi.z.zhang@intel.com
Subject: [PATCH v8 4/9] net/ice: support queue bandwidth limit
Date: Thu, 28 Apr 2022 10:59:09 +0800	[thread overview]
Message-ID: <20220428025914.3475281-5-wenjun1.wu@intel.com> (raw)
In-Reply-To: <20220428025914.3475281-1-wenjun1.wu@intel.com>

From: Ting Xu <ting.xu@intel.com>

Enable basic TM API for PF only. Support for adding profiles and queue
nodes. Only max bandwidth is supported in profiles. Profiles can be
assigned to target queues. Only TC0 is valid.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 doc/guides/rel_notes/release_22_07.rst |   4 +
 drivers/net/ice/ice_ethdev.c           |  19 +
 drivers/net/ice/ice_ethdev.h           |  48 ++
 drivers/net/ice/ice_tm.c               | 599 +++++++++++++++++++++++++
 drivers/net/ice/meson.build            |   1 +
 5 files changed, 671 insertions(+)
 create mode 100644 drivers/net/ice/ice_tm.c

diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index 42a5f2d990..2ce3c99fb8 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel ice driver.**
+
+  * Added Tx QoS rate limitation and priority configuration support for queue and queue group.
+  * Added TX QoS queue weight configuration support.
 
 Removed Items
 -------------
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 13adcf90ed..37897765c8 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -205,6 +205,18 @@ static const struct rte_pci_id pci_id_ice_map[] = {
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
+static int
+ice_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+		void *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	*(const void **)arg = &ice_tm_ops;
+
+	return 0;
+}
+
 static const struct eth_dev_ops ice_eth_dev_ops = {
 	.dev_configure                = ice_dev_configure,
 	.dev_start                    = ice_dev_start,
@@ -267,6 +279,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.timesync_read_time           = ice_timesync_read_time,
 	.timesync_write_time          = ice_timesync_write_time,
 	.timesync_disable             = ice_timesync_disable,
+	.tm_ops_get                   = ice_tm_ops_get,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -2312,6 +2325,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* Initialize RSS context for gtpu_eh */
 	ice_rss_ctx_init(pf);
 
+	/* Initialize TM configuration */
+	ice_tm_conf_init(dev);
+
 	if (!ad->is_safe_mode) {
 		ret = ice_flow_init(ad);
 		if (ret) {
@@ -2492,6 +2508,9 @@ ice_dev_close(struct rte_eth_dev *dev)
 	rte_free(pf->proto_xtr);
 	pf->proto_xtr = NULL;
 
+	/* Uninit TM configuration */
+	ice_tm_conf_uninit(dev);
+
 	if (ad->devargs.pps_out_ena) {
 		ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
 		ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 3ed580d438..0841e1866c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -9,10 +9,12 @@
 #include <rte_time.h>
 
 #include <ethdev_driver.h>
+#include <rte_tm_driver.h>
 
 #include "base/ice_common.h"
 #include "base/ice_adminq_cmd.h"
 #include "base/ice_flow.h"
+#include "base/ice_sched.h"
 
 #define ICE_ADMINQ_LEN               32
 #define ICE_SBIOQ_LEN                32
@@ -453,6 +455,48 @@ struct ice_acl_info {
 	uint64_t hw_entry_id[MAX_ACL_NORMAL_ENTRIES];
 };
 
+TAILQ_HEAD(ice_shaper_profile_list, ice_tm_shaper_profile);
+TAILQ_HEAD(ice_tm_node_list, ice_tm_node);
+
+struct ice_tm_shaper_profile {
+	TAILQ_ENTRY(ice_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct ice_tm_node {
+	TAILQ_ENTRY(ice_tm_node) node;
+	uint32_t id;
+	uint32_t tc;
+	uint32_t priority;
+	uint32_t weight;
+	uint32_t reference_count;
+	struct ice_tm_node *parent;
+	struct ice_tm_shaper_profile *shaper_profile;
+	struct rte_tm_node_params params;
+};
+
+/* node type of Traffic Manager */
+enum ice_tm_node_type {
+	ICE_TM_NODE_TYPE_PORT,
+	ICE_TM_NODE_TYPE_TC,
+	ICE_TM_NODE_TYPE_QUEUE,
+	ICE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store all the Traffic Manager configuration. */
+struct ice_tm_conf {
+	struct ice_shaper_profile_list shaper_profile_list;
+	struct ice_tm_node *root; /* root node - vf vsi */
+	struct ice_tm_node_list tc_list; /* node list for all the TCs */
+	struct ice_tm_node_list queue_list; /* node list for all the queues */
+	uint32_t nb_tc_node;
+	uint32_t nb_queue_node;
+	bool committed;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -497,6 +541,7 @@ struct ice_pf {
 	uint64_t old_tx_bytes;
 	uint64_t supported_rxdid; /* bitmap for supported RXDID */
 	uint64_t rss_hf;
+	struct ice_tm_conf tm_conf;
 };
 
 #define ICE_MAX_QUEUE_NUM  2048
@@ -620,6 +665,9 @@ int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
 			 struct ice_rss_hash_cfg *cfg);
 int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
 			 struct ice_rss_hash_cfg *cfg);
+void ice_tm_conf_init(struct rte_eth_dev *dev);
+void ice_tm_conf_uninit(struct rte_eth_dev *dev);
+extern const struct rte_tm_ops ice_tm_ops;
 
 static inline int
 ice_align_floor(int n)
diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c
new file mode 100644
index 0000000000..383af88981
--- /dev/null
+++ b/drivers/net/ice/ice_tm.c
@@ -0,0 +1,599 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+#include <rte_tm_driver.h>
+
+#include "ice_ethdev.h"
+#include "ice_rxtx.h"
+
+static int ice_hierarchy_commit(struct rte_eth_dev *dev,
+				 int clear_on_fail,
+				 __rte_unused struct rte_tm_error *error);
+static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+	      uint32_t parent_node_id, uint32_t priority,
+	      uint32_t weight, uint32_t level_id,
+	      struct rte_tm_node_params *params,
+	      struct rte_tm_error *error);
+static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+			    struct rte_tm_error *error);
+static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+		   int *is_leaf, struct rte_tm_error *error);
+static int ice_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error);
+static int ice_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
+
+const struct rte_tm_ops ice_tm_ops = {
+	.shaper_profile_add = ice_shaper_profile_add,
+	.shaper_profile_delete = ice_shaper_profile_del,
+	.node_add = ice_tm_node_add,
+	.node_delete = ice_tm_node_delete,
+	.node_type_get = ice_node_type_get,
+	.hierarchy_commit = ice_hierarchy_commit,
+};
+
+void
+ice_tm_conf_init(struct rte_eth_dev *dev)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+	/* initialize node configuration */
+	TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+	pf->tm_conf.root = NULL;
+	TAILQ_INIT(&pf->tm_conf.tc_list);
+	TAILQ_INIT(&pf->tm_conf.queue_list);
+	pf->tm_conf.nb_tc_node = 0;
+	pf->tm_conf.nb_queue_node = 0;
+	pf->tm_conf.committed = false;
+}
+
+void
+ice_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_tm_node *tm_node;
+
+	/* clear node configuration */
+	while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+		TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+		rte_free(tm_node);
+	}
+	pf->tm_conf.nb_queue_node = 0;
+	while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+		TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+		rte_free(tm_node);
+	}
+	pf->tm_conf.nb_tc_node = 0;
+	if (pf->tm_conf.root) {
+		rte_free(pf->tm_conf.root);
+		pf->tm_conf.root = NULL;
+	}
+}
+
+static inline struct ice_tm_node *
+ice_tm_node_search(struct rte_eth_dev *dev,
+		    uint32_t node_id, enum ice_tm_node_type *node_type)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+	struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+	struct ice_tm_node *tm_node;
+
+	if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+		*node_type = ICE_TM_NODE_TYPE_PORT;
+		return pf->tm_conf.root;
+	}
+
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = ICE_TM_NODE_TYPE_TC;
+			return tm_node;
+		}
+	}
+
+	TAILQ_FOREACH(tm_node, queue_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = ICE_TM_NODE_TYPE_QUEUE;
+			return tm_node;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+ice_node_param_check(struct ice_pf *pf, uint32_t node_id,
+		      uint32_t priority, uint32_t weight,
+		      struct rte_tm_node_params *params,
+		      struct rte_tm_error *error)
+{
+	/* checked all the unsupported parameter */
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	if (priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority should be 0";
+		return -EINVAL;
+	}
+
+	if (weight != 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight must be 1";
+		return -EINVAL;
+	}
+
+	/* not support shared shaper */
+	if (params->shared_shaper_id) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+	if (params->n_shared_shapers) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+
+	/* for non-leaf node */
+	if (node_id >= pf->dev_data->nb_tx_queues) {
+		if (params->nonleaf.wfq_weight_mode) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+			error->message = "WFQ not supported";
+			return -EINVAL;
+		}
+		if (params->nonleaf.n_sp_priorities != 1) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+			error->message = "SP priority not supported";
+			return -EINVAL;
+		} else if (params->nonleaf.wfq_weight_mode &&
+			   !(*params->nonleaf.wfq_weight_mode)) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+			error->message = "WFP should be byte mode";
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	/* for leaf node */
+	if (params->leaf.cman) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+		error->message = "Congestion management not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.wred_profile_id !=
+	    RTE_TM_WRED_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.shared_wred_context_id) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.n_shared_wred_contexts) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+		   int *is_leaf, struct rte_tm_error *error)
+{
+	enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
+	struct ice_tm_node *tm_node;
+
+	if (!is_leaf || !error)
+		return -EINVAL;
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	/* check if the node id exists */
+	tm_node = ice_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	if (node_type == ICE_TM_NODE_TYPE_QUEUE)
+		*is_leaf = true;
+	else
+		*is_leaf = false;
+
+	return 0;
+}
+
+static inline struct ice_tm_shaper_profile *
+ice_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_shaper_profile_list *shaper_profile_list =
+		&pf->tm_conf.shaper_profile_list;
+	struct ice_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
+static int
+ice_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+ice_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = ice_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("ice_tm_shaper_profile",
+				     sizeof(struct ice_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+ice_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
+static int
+ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+	      uint32_t parent_node_id, uint32_t priority,
+	      uint32_t weight, uint32_t level_id,
+	      struct rte_tm_node_params *params,
+	      struct rte_tm_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
+	enum ice_tm_node_type parent_node_type = ICE_TM_NODE_TYPE_MAX;
+	struct ice_tm_shaper_profile *shaper_profile = NULL;
+	struct ice_tm_node *tm_node;
+	struct ice_tm_node *parent_node;
+	uint16_t tc_nb = 1;
+	int ret;
+
+	if (!params || !error)
+		return -EINVAL;
+
+	/* if already committed */
+	if (pf->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	ret = ice_node_param_check(pf, node_id, priority, weight,
+				    params, error);
+	if (ret)
+		return ret;
+
+	/* check if the node is already existed */
+	if (ice_tm_node_search(dev, node_id, &node_type)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used";
+		return -EINVAL;
+	}
+
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = ice_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
+	/* root node if not have a parent */
+	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+		/* check level */
+		if (level_id != ICE_TM_NODE_TYPE_PORT) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "Wrong level";
+			return -EINVAL;
+		}
+
+		/* obviously no more than one root */
+		if (pf->tm_conf.root) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+			error->message = "already have a root";
+			return -EINVAL;
+		}
+
+		/* add the root node */
+		tm_node = rte_zmalloc("ice_tm_node",
+				      sizeof(struct ice_tm_node),
+				      0);
+		if (!tm_node)
+			return -ENOMEM;
+		tm_node->id = node_id;
+		tm_node->parent = NULL;
+		tm_node->reference_count = 0;
+		rte_memcpy(&tm_node->params, params,
+				 sizeof(struct rte_tm_node_params));
+		pf->tm_conf.root = tm_node;
+		return 0;
+	}
+
+	/* TC or queue node */
+	/* check the parent node */
+	parent_node = ice_tm_node_search(dev, parent_node_id,
+					  &parent_node_type);
+	if (!parent_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent not exist";
+		return -EINVAL;
+	}
+	if (parent_node_type != ICE_TM_NODE_TYPE_PORT &&
+	    parent_node_type != ICE_TM_NODE_TYPE_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent is not root or TC";
+		return -EINVAL;
+	}
+	/* check level */
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != (uint32_t)parent_node_type + 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Wrong level";
+		return -EINVAL;
+	}
+
+	/* check the node number */
+	if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
+		/* check the TC number */
+		if (pf->tm_conf.nb_tc_node >= tc_nb) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many TCs";
+			return -EINVAL;
+		}
+	} else {
+		/* check the queue number */
+		if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many queues";
+			return -EINVAL;
+		}
+		if (node_id >= pf->dev_data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too large queue id";
+			return -EINVAL;
+		}
+	}
+
+	/* add the TC or queue node */
+	tm_node = rte_zmalloc("ice_tm_node",
+			      sizeof(struct ice_tm_node),
+			      0);
+	if (!tm_node)
+		return -ENOMEM;
+	tm_node->id = node_id;
+	tm_node->priority = priority;
+	tm_node->weight = weight;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
+	rte_memcpy(&tm_node->params, params,
+			 sizeof(struct rte_tm_node_params));
+	if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
+		TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
+				  tm_node, node);
+		tm_node->tc = pf->tm_conf.nb_tc_node;
+		pf->tm_conf.nb_tc_node++;
+	} else {
+		TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
+				  tm_node, node);
+		tm_node->tc = parent_node->tc;
+		pf->tm_conf.nb_queue_node++;
+	}
+	tm_node->parent->reference_count++;
+
+	return 0;
+}
+
+static int
+ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+		 struct rte_tm_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
+	struct ice_tm_node *tm_node;
+
+	if (!error)
+		return -EINVAL;
+
+	/* if already committed */
+	if (pf->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	/* check if the node id exists */
+	tm_node = ice_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	/* the node should have no child */
+	if (tm_node->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message =
+			"cannot delete a node which has children";
+		return -EINVAL;
+	}
+
+	/* root node */
+	if (node_type == ICE_TM_NODE_TYPE_PORT) {
+		rte_free(tm_node);
+		pf->tm_conf.root = NULL;
+		return 0;
+	}
+
+	/* TC or queue node */
+	tm_node->parent->reference_count--;
+	if (node_type == ICE_TM_NODE_TYPE_TC) {
+		TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+		pf->tm_conf.nb_tc_node--;
+	} else {
+		TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+		pf->tm_conf.nb_queue_node--;
+	}
+	rte_free(tm_node);
+
+	return 0;
+}
+
+static int ice_hierarchy_commit(struct rte_eth_dev *dev,
+				 int clear_on_fail,
+				 __rte_unused struct rte_tm_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+	struct ice_tm_node *tm_node;
+	struct ice_tx_queue *txq;
+	struct ice_vsi *vsi;
+	int ret_val = ICE_SUCCESS;
+	uint64_t peak = 0;
+
+	TAILQ_FOREACH(tm_node, queue_list, node) {
+		txq = dev->data->tx_queues[tm_node->id];
+		vsi = txq->vsi;
+		if (tm_node->shaper_profile)
+			/* Transfer from Byte per seconds to Kbps */
+			peak = tm_node->shaper_profile->profile.peak.rate;
+
+		peak = peak / 1000 * BITS_PER_BYTE;
+		ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
+				 tm_node->tc, tm_node->id, ICE_MAX_BW, (u32)peak);
+		if (ret_val) {
+			error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+			PMD_DRV_LOG(ERR, "configure queue %u bandwidth failed", tm_node->id);
+			goto fail_clear;
+		}
+	}
+
+	return ret_val;
+
+fail_clear:
+	/* clear all the traffic manager configuration */
+	if (clear_on_fail) {
+		ice_tm_conf_uninit(dev);
+		ice_tm_conf_init(dev);
+	}
+	return ret_val;
+}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index d608da7765..de307c9e71 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -12,6 +12,7 @@ sources = files(
         'ice_hash.c',
         'ice_rxtx.c',
         'ice_switch_filter.c',
+        'ice_tm.c',
 )
 
 deps += ['hash', 'net', 'common_iavf']
-- 
2.25.1


  parent reply	other threads:[~2022-04-28  3:21 UTC|newest]

Thread overview: 109+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-29  1:48 [PATCH v1 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 2/9] net/base/ice: support priority configuration of the exact node Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 3/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 5/9] net/ice: support queue group " Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-03-29  1:48 ` [PATCH v1 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-03-29  2:02 ` [PATCH v2 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 2/9] net/base/ice: support priority configuration of the exact node Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 3/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 5/9] net/ice: support queue group " Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-03-29  2:03   ` [PATCH v2 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-03-29  3:06   ` [PATCH v3 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 2/9] net/base/ice: support priority configuration of the exact node Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 3/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 5/9] net/ice: support queue group " Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-03-29  3:06     ` [PATCH v3 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-03-29  4:56     ` [PATCH v4 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 2/9] net/base/ice: support priority configuration of the exact node Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 3/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 5/9] net/ice: support queue group " Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-03-29  4:56       ` [PATCH v4 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-03-29  9:38       ` [PATCH v5 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 2/9] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 3/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 5/9] net/ice: support queue group " Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-03-29  9:38         ` [PATCH v5 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-04-08  5:38 ` [PATCH v6 00/10] Enable ETS-based TX QoS on PF Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 01/10] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 02/10] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 03/10] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 04/10] net/ice: support queue bandwidth limit Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 05/10] net/ice: support queue group " Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 06/10] net/ice: support queue priority configuration Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 07/10] net/ice: support queue weight configuration Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 08/10] net/ice: support queue group priority configuration Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 09/10] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-04-08  5:38   ` [PATCH v6 10/10] doc: add release notes for 22.07 Wenjun Wu
2022-04-22  0:57 ` [PATCH v7 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 2/9] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 3/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 5/9] net/ice: support queue group " Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-04-22  0:57   ` [PATCH v7 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-04-27  1:59   ` [PATCH v7 0/9] Enable ETS-based TX QoS on PF Yang, Qiming
2022-04-28  2:59 ` [PATCH v8 " Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 2/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 3/9] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-04-28  2:59   ` Wenjun Wu [this message]
2022-04-28  2:59   ` [PATCH v8 5/9] net/ice: support queue group bandwidth limit Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-04-28  2:59   ` [PATCH v8 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-04-28  3:30 ` [PATCH v9 0/9] Enable ETS-based TX QoS on PF Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 1/9] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 2/9] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 3/9] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 4/9] net/ice: support queue bandwidth limit Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 5/9] net/ice: support queue group " Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 6/9] net/ice: support queue priority configuration Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 7/9] net/ice: support queue weight configuration Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 8/9] net/ice: support queue group priority configuration Wenjun Wu
2022-04-28  3:30   ` [PATCH v9 9/9] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-05-17  4:59 ` [PATCH v10 0/7] Enable ETS-based TX QoS on PF Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 1/7] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 2/7] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 3/7] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 4/7] net/ice: support queue and queue group bandwidth limit Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 5/7] net/ice: support queue and queue group priority configuration Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 6/7] net/ice: support queue weight configuration Wenjun Wu
2022-05-17  4:59   ` [PATCH v10 7/7] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-05-17  5:09 ` [PATCH v11 0/7] Enable ETS-based TX QoS on PF Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 1/7] net/ice/base: fix dead lock issue when getting node from ID type Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 2/7] net/ice/base: support queue BW allocation configuration Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 3/7] net/ice/base: support priority configuration of the exact node Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 4/7] net/ice: support queue and queue group bandwidth limit Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 5/7] net/ice: support queue and queue group priority configuration Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 6/7] net/ice: support queue weight configuration Wenjun Wu
2022-05-17  5:09   ` [PATCH v11 7/7] net/ice: add warning log for unsupported configuration Wenjun Wu
2022-05-17  7:23   ` [PATCH v11 0/7] Enable ETS-based TX QoS on PF Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220428025914.3475281-5-wenjun1.wu@intel.com \
    --to=wenjun1.wu@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).