From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <wenzhuo.lu@intel.com>
Received: from mga07.intel.com (mga07.intel.com [134.134.136.100])
 by dpdk.org (Postfix) with ESMTP id B4B43567F
 for <dev@dpdk.org>; Mon, 19 Jun 2017 07:43:45 +0200 (CEST)
Received: from fmsmga002.fm.intel.com ([10.253.24.26])
 by orsmga105.jf.intel.com with ESMTP; 18 Jun 2017 22:43:45 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.39,359,1493708400"; d="scan'208";a="1184169689"
Received: from dpdk26.sh.intel.com ([10.239.129.102])
 by fmsmga002.fm.intel.com with ESMTP; 18 Jun 2017 22:43:44 -0700
From: Wenzhuo Lu <wenzhuo.lu@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com, jasvinder.singh@intel.com,
 jingjing.wu@intel.com, Wenzhuo Lu <wenzhuo.lu@intel.com>
Date: Mon, 19 Jun 2017 13:43:54 +0800
Message-Id: <1497851036-96016-19-git-send-email-wenzhuo.lu@intel.com>
X-Mailer: git-send-email 1.9.3
In-Reply-To: <1497851036-96016-1-git-send-email-wenzhuo.lu@intel.com>
References: <1495873075-49542-1-git-send-email-wenzhuo.lu@intel.com>
 <1497851036-96016-1-git-send-email-wenzhuo.lu@intel.com>
Subject: [dpdk-dev] [PATCH v2 18/20] net/ixgbe: support getting TM level
	capability
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Mon, 19 Jun 2017 05:43:47 -0000

Add the support of the Traffic Management API,
rte_tm_level_capabilities_get.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_tm.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 78 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 010ceac..6ff7026 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -54,6 +54,10 @@ static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 			     struct rte_tm_error *error);
 static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 			       int *is_leaf, struct rte_tm_error *error);
+static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+					uint32_t level_id,
+					struct rte_tm_level_capabilities *cap,
+					struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
 	.capabilities_get = ixgbe_tm_capabilities_get,
@@ -62,6 +66,7 @@ static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 	.node_add = ixgbe_node_add,
 	.node_delete = ixgbe_node_delete,
 	.node_type_get = ixgbe_node_type_get,
+	.level_capabilities_get = ixgbe_level_capabilities_get,
 };
 
 int
@@ -765,3 +770,76 @@ static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
 
 	return 0;
 }
+
+static int
+ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+			     uint32_t level_id,
+			     struct rte_tm_level_capabilities *cap,
+			     struct rte_tm_error *error)
+{
+	uint8_t nb_tc = 0;
+	uint8_t nb_queue = 0;
+
+	if (!cap || !error)
+		return -EINVAL;
+
+	if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "too deep level";
+		return -EINVAL;
+	}
+
+	nb_tc = ixgbe_tc_nb_get(dev);
+	nb_queue = dev->data->nb_tx_queues;
+
+	/* root node */
+	if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
+		cap->n_nodes_max = 1;
+		cap->n_nodes_nonleaf_max = 1;
+		cap->n_nodes_leaf_max = 0;
+		cap->non_leaf_nodes_identical = false;
+		cap->leaf_nodes_identical = false;
+		cap->nonleaf.shaper_private_supported = true;
+		cap->nonleaf.shaper_private_dual_rate_supported = false;
+		cap->nonleaf.shaper_private_rate_min = 0;
+		/* 10Gbps -> 1.25GBps */
+		cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+		cap->nonleaf.shaper_shared_n_max = 0;
+		cap->nonleaf.sched_n_children_max = nb_tc;
+		cap->nonleaf.sched_sp_n_priorities_max = 0;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+		cap->nonleaf.sched_wfq_n_groups_max = 0;
+		cap->nonleaf.sched_wfq_weight_max = 0;
+		cap->nonleaf.stats_mask = 0;
+
+		return 0;
+	}
+
+	/* TC or queue node */
+	if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+		/* TC */
+		cap->n_nodes_max = nb_tc;
+		cap->n_nodes_nonleaf_max = nb_tc;
+		cap->n_nodes_leaf_max = nb_tc;
+		cap->non_leaf_nodes_identical = true;
+	} else {
+		/* queue */
+		cap->n_nodes_max = nb_queue;
+		cap->n_nodes_nonleaf_max = 0;
+		cap->n_nodes_leaf_max = nb_queue;
+		cap->non_leaf_nodes_identical = false;
+	}
+	cap->leaf_nodes_identical = true;
+	cap->leaf.shaper_private_supported = true;
+	cap->leaf.shaper_private_dual_rate_supported = false;
+	cap->leaf.shaper_private_rate_min = 0;
+	/* 10Gbps -> 1.25GBps */
+	cap->leaf.shaper_private_rate_max = 1250000000ull;
+	cap->leaf.shaper_shared_n_max = 0;
+	cap->leaf.cman_head_drop_supported = false;
+	cap->leaf.cman_wred_context_private_supported = false;
+	cap->leaf.cman_wred_context_shared_n_max = 0;
+	cap->leaf.stats_mask = 0;
+
+	return 0;
+}
-- 
1.9.3