DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <nithind1988@gmail.com>
To: Beilei Xing <beilei.xing@intel.com>,
	Qi Zhang <qi.z.zhang@intel.com>, Rosen Xu <rosen.xu@intel.com>,
	Wenzhuo Lu <wenzhuo.lu@intel.com>,
	Konstantin Ananyev <konstantin.ananyev@intel.com>,
	Tomasz Duszynski <tdu@semihalf.com>,
	Liron Himi <lironh@marvell.com>,
	Jasvinder Singh <jasvinder.singh@intel.com>,
	Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Cc: dev@dpdk.org, jerinj@marvell.com, kkanas@marvell.com,
	Nithin Dabilpuram <ndabilpuram@marvell.com>
Subject: [dpdk-dev] [PATCH v2 2/4] drivers/net: update tm capability for existing pmds
Date: Sat, 11 Apr 2020 17:14:28 +0530	[thread overview]
Message-ID: <20200411114430.18506-2-nithind1988@gmail.com> (raw)
In-Reply-To: <20200411114430.18506-1-nithind1988@gmail.com>

From: Nithin Dabilpuram <ndabilpuram@marvell.com>

Since existing PMD's support shaper byte mode and scheduler
wfq byte mode, update the same in their port/level/node capabilities
that are added.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
v1..v2:
- Newly included patch to change exiting pmd's with tm support of byte mode 
to show the same in port/level/node cap.

 drivers/net/i40e/i40e_tm.c               | 16 ++++++++++++
 drivers/net/ipn3ke/ipn3ke_tm.c           | 26 ++++++++++++++++++
 drivers/net/ixgbe/ixgbe_tm.c             | 16 ++++++++++++
 drivers/net/mvpp2/mrvl_tm.c              | 14 ++++++++++
 drivers/net/softnic/rte_eth_softnic_tm.c | 45 ++++++++++++++++++++++++++++++++
 5 files changed, 117 insertions(+)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index c76760c..ab272e9 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -160,12 +160,16 @@ i40e_tm_capabilities_get(struct rte_eth_dev *dev,
 	cap->shaper_private_rate_min = 0;
 	/* 40Gbps -> 5GBps */
 	cap->shaper_private_rate_max = 5000000000ull;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 	cap->shaper_shared_n_max = 0;
 	cap->shaper_shared_n_nodes_per_shaper_max = 0;
 	cap->shaper_shared_n_shapers_per_node_max = 0;
 	cap->shaper_shared_dual_rate_n_max = 0;
 	cap->shaper_shared_rate_min = 0;
 	cap->shaper_shared_rate_max = 0;
+	cap->shaper_shared_packet_mode_supported = 0;
+	cap->shaper_shared_byte_mode_supported = 0;
 	cap->sched_n_children_max = hw->func_caps.num_tx_qp;
 	/**
 	 * HW supports SP. But no plan to support it now.
@@ -179,6 +183,8 @@ i40e_tm_capabilities_get(struct rte_eth_dev *dev,
 	 * So, all the nodes should have the same weight.
 	 */
 	cap->sched_wfq_weight_max = 1;
+	cap->sched_wfq_packet_mode_supported = 0;
+	cap->sched_wfq_byte_mode_supported = 0;
 	cap->cman_head_drop_supported = 0;
 	cap->dynamic_update_mask = 0;
 	cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
@@ -754,6 +760,8 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.shaper_private_rate_min = 0;
 		/* 40Gbps -> 5GBps */
 		cap->nonleaf.shaper_private_rate_max = 5000000000ull;
+		cap->nonleaf.shaper_private_packet_mode_supported = 0;
+		cap->nonleaf.shaper_private_byte_mode_supported = 1;
 		cap->nonleaf.shaper_shared_n_max = 0;
 		if (level_id == I40E_TM_NODE_TYPE_PORT)
 			cap->nonleaf.sched_n_children_max =
@@ -765,6 +773,8 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 		cap->nonleaf.sched_wfq_n_groups_max = 0;
 		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 		cap->nonleaf.stats_mask = 0;
 
 		return 0;
@@ -776,6 +786,8 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev,
 	cap->leaf.shaper_private_rate_min = 0;
 	/* 40Gbps -> 5GBps */
 	cap->leaf.shaper_private_rate_max = 5000000000ull;
+	cap->leaf.shaper_private_packet_mode_supported = 0;
+	cap->leaf.shaper_private_byte_mode_supported = 1;
 	cap->leaf.shaper_shared_n_max = 0;
 	cap->leaf.cman_head_drop_supported = false;
 	cap->leaf.cman_wred_context_private_supported = true;
@@ -817,6 +829,8 @@ i40e_node_capabilities_get(struct rte_eth_dev *dev,
 	cap->shaper_private_rate_min = 0;
 	/* 40Gbps -> 5GBps */
 	cap->shaper_private_rate_max = 5000000000ull;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 	cap->shaper_shared_n_max = 0;
 
 	if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
@@ -834,6 +848,8 @@ i40e_node_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 		cap->nonleaf.sched_wfq_n_groups_max = 0;
 		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 	}
 
 	cap->stats_mask = 0;
diff --git a/drivers/net/ipn3ke/ipn3ke_tm.c b/drivers/net/ipn3ke/ipn3ke_tm.c
index 5a16c5f..35c90b8 100644
--- a/drivers/net/ipn3ke/ipn3ke_tm.c
+++ b/drivers/net/ipn3ke/ipn3ke_tm.c
@@ -440,6 +440,8 @@ ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
 	cap->shaper_private_dual_rate_n_max = 0;
 	cap->shaper_private_rate_min = 1;
 	cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 
 	cap->shaper_shared_n_max = 0;
 	cap->shaper_shared_n_nodes_per_shaper_max = 0;
@@ -447,6 +449,8 @@ ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
 	cap->shaper_shared_dual_rate_n_max = 0;
 	cap->shaper_shared_rate_min = 0;
 	cap->shaper_shared_rate_max = 0;
+	cap->shaper_shared_packet_mode_supported = 0;
+	cap->shaper_shared_byte_mode_supported = 0;
 
 	cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
 	cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
@@ -456,6 +460,8 @@ ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev,
 	cap->sched_wfq_n_children_per_group_max = UINT32_MAX;
 	cap->sched_wfq_n_groups_max = 1;
 	cap->sched_wfq_weight_max = UINT32_MAX;
+	cap->sched_wfq_packet_mode_supported = 0;
+	cap->sched_wfq_byte_mode_supported = 1;
 
 	cap->cman_wred_packet_mode_supported = 0;
 	cap->cman_wred_byte_mode_supported = 0;
@@ -517,6 +523,8 @@ ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.shaper_private_dual_rate_supported = 0;
 		cap->nonleaf.shaper_private_rate_min = 1;
 		cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
+		cap->nonleaf.shaper_private_packet_mode_supported = 0;
+		cap->nonleaf.shaper_private_byte_mode_supported = 1;
 		cap->nonleaf.shaper_shared_n_max = 0;
 
 		cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
@@ -524,6 +532,8 @@ ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 		cap->nonleaf.sched_wfq_n_groups_max = 0;
 		cap->nonleaf.sched_wfq_weight_max = 0;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 
 		cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
 		break;
@@ -539,6 +549,8 @@ ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.shaper_private_dual_rate_supported = 0;
 		cap->nonleaf.shaper_private_rate_min = 1;
 		cap->nonleaf.shaper_private_rate_max = UINT32_MAX;
+		cap->nonleaf.shaper_private_packet_mode_supported = 0;
+		cap->nonleaf.shaper_private_byte_mode_supported = 1;
 		cap->nonleaf.shaper_shared_n_max = 0;
 
 		cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
@@ -546,6 +558,8 @@ ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 		cap->nonleaf.sched_wfq_n_groups_max = 0;
 		cap->nonleaf.sched_wfq_weight_max = 0;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 
 		cap->nonleaf.stats_mask = STATS_MASK_DEFAULT;
 		break;
@@ -561,6 +575,8 @@ ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->leaf.shaper_private_dual_rate_supported = 0;
 		cap->leaf.shaper_private_rate_min = 0;
 		cap->leaf.shaper_private_rate_max = 0;
+		cap->leaf.shaper_private_packet_mode_supported = 0;
+		cap->leaf.shaper_private_byte_mode_supported = 1;
 		cap->leaf.shaper_shared_n_max = 0;
 
 		cap->leaf.cman_head_drop_supported = 0;
@@ -632,6 +648,8 @@ ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
 		cap->shaper_private_dual_rate_supported = 0;
 		cap->shaper_private_rate_min = 1;
 		cap->shaper_private_rate_max = UINT32_MAX;
+		cap->shaper_private_packet_mode_supported = 0;
+		cap->shaper_private_byte_mode_supported = 1;
 		cap->shaper_shared_n_max = 0;
 
 		cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM;
@@ -640,6 +658,8 @@ ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
 			IPN3KE_TM_VT_NODE_NUM;
 		cap->nonleaf.sched_wfq_n_groups_max = 1;
 		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 
 		cap->stats_mask = STATS_MASK_DEFAULT;
 		break;
@@ -649,6 +669,8 @@ ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
 		cap->shaper_private_dual_rate_supported = 0;
 		cap->shaper_private_rate_min = 1;
 		cap->shaper_private_rate_max = UINT32_MAX;
+		cap->shaper_private_packet_mode_supported = 0;
+		cap->shaper_private_byte_mode_supported = 1;
 		cap->shaper_shared_n_max = 0;
 
 		cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM;
@@ -657,6 +679,8 @@ ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
 			IPN3KE_TM_COS_NODE_NUM;
 		cap->nonleaf.sched_wfq_n_groups_max = 1;
 		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 
 		cap->stats_mask = STATS_MASK_DEFAULT;
 		break;
@@ -666,6 +690,8 @@ ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev,
 		cap->shaper_private_dual_rate_supported = 0;
 		cap->shaper_private_rate_min = 0;
 		cap->shaper_private_rate_max = 0;
+		cap->shaper_private_packet_mode_supported = 0;
+		cap->shaper_private_byte_mode_supported = 0;
 		cap->shaper_shared_n_max = 0;
 
 		cap->leaf.cman_head_drop_supported = 0;
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 73845a7..c067109 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -168,12 +168,16 @@ ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
 	cap->shaper_private_rate_min = 0;
 	/* 10Gbps -> 1.25GBps */
 	cap->shaper_private_rate_max = 1250000000ull;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 	cap->shaper_shared_n_max = 0;
 	cap->shaper_shared_n_nodes_per_shaper_max = 0;
 	cap->shaper_shared_n_shapers_per_node_max = 0;
 	cap->shaper_shared_dual_rate_n_max = 0;
 	cap->shaper_shared_rate_min = 0;
 	cap->shaper_shared_rate_max = 0;
+	cap->shaper_shared_packet_mode_supported = 0;
+	cap->shaper_shared_byte_mode_supported = 0;
 	cap->sched_n_children_max = hw->mac.max_tx_queues;
 	/**
 	 * HW supports SP. But no plan to support it now.
@@ -182,6 +186,8 @@ ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
 	cap->sched_sp_n_priorities_max = 1;
 	cap->sched_wfq_n_children_per_group_max = 0;
 	cap->sched_wfq_n_groups_max = 0;
+	cap->sched_wfq_packet_mode_supported = 0;
+	cap->sched_wfq_byte_mode_supported = 0;
 	/**
 	 * SW only supports fair round robin now.
 	 * So, all the nodes should have the same weight.
@@ -875,6 +881,8 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.shaper_private_rate_min = 0;
 		/* 10Gbps -> 1.25GBps */
 		cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+		cap->nonleaf.shaper_private_packet_mode_supported = 0;
+		cap->nonleaf.shaper_private_byte_mode_supported = 1;
 		cap->nonleaf.shaper_shared_n_max = 0;
 		if (level_id == IXGBE_TM_NODE_TYPE_PORT)
 			cap->nonleaf.sched_n_children_max =
@@ -886,6 +894,8 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 		cap->nonleaf.sched_wfq_n_groups_max = 0;
 		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 		cap->nonleaf.stats_mask = 0;
 
 		return 0;
@@ -897,6 +907,8 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
 	cap->leaf.shaper_private_rate_min = 0;
 	/* 10Gbps -> 1.25GBps */
 	cap->leaf.shaper_private_rate_max = 1250000000ull;
+	cap->leaf.shaper_private_packet_mode_supported = 0;
+	cap->leaf.shaper_private_byte_mode_supported = 1;
 	cap->leaf.shaper_shared_n_max = 0;
 	cap->leaf.cman_head_drop_supported = false;
 	cap->leaf.cman_wred_context_private_supported = true;
@@ -938,6 +950,8 @@ ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
 	cap->shaper_private_rate_min = 0;
 	/* 10Gbps -> 1.25GBps */
 	cap->shaper_private_rate_max = 1250000000ull;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 	cap->shaper_shared_n_max = 0;
 
 	if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
@@ -955,6 +969,8 @@ ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
 		cap->nonleaf.sched_wfq_n_groups_max = 0;
 		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
 	}
 
 	cap->stats_mask = 0;
diff --git a/drivers/net/mvpp2/mrvl_tm.c b/drivers/net/mvpp2/mrvl_tm.c
index 3de8997..e98f576 100644
--- a/drivers/net/mvpp2/mrvl_tm.c
+++ b/drivers/net/mvpp2/mrvl_tm.c
@@ -193,12 +193,16 @@ mrvl_capabilities_get(struct rte_eth_dev *dev,
 	cap->shaper_private_n_max = cap->shaper_n_max;
 	cap->shaper_private_rate_min = MRVL_RATE_MIN;
 	cap->shaper_private_rate_max = priv->rate_max;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 
 	cap->sched_n_children_max = dev->data->nb_tx_queues;
 	cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
 	cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
 	cap->sched_wfq_n_groups_max = 1;
 	cap->sched_wfq_weight_max = MRVL_WEIGHT_MAX;
+	cap->sched_wfq_packet_mode_supported = 0;
+	cap->sched_wfq_byte_mode_supported = 1;
 
 	cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_SUSPEND_RESUME |
 				   RTE_TM_UPDATE_NODE_STATS;
@@ -244,6 +248,8 @@ mrvl_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->nonleaf.shaper_private_supported = 1;
 		cap->nonleaf.shaper_private_rate_min = MRVL_RATE_MIN;
 		cap->nonleaf.shaper_private_rate_max = priv->rate_max;
+		cap->nonleaf.shaper_private_packet_mode_supported = 0;
+		cap->nonleaf.shaper_private_byte_mode_supported = 1;
 
 		cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
 		cap->nonleaf.sched_sp_n_priorities_max = 1;
@@ -251,6 +257,8 @@ mrvl_level_capabilities_get(struct rte_eth_dev *dev,
 			dev->data->nb_tx_queues;
 		cap->nonleaf.sched_wfq_n_groups_max = 1;
 		cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 1;
 		cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
 					  RTE_TM_STATS_N_BYTES;
 	} else { /* level_id == MRVL_NODE_QUEUE */
@@ -261,6 +269,8 @@ mrvl_level_capabilities_get(struct rte_eth_dev *dev,
 		cap->leaf.shaper_private_supported = 1;
 		cap->leaf.shaper_private_rate_min = MRVL_RATE_MIN;
 		cap->leaf.shaper_private_rate_max = priv->rate_max;
+		cap->leaf.shaper_private_packet_mode_supported = 0;
+		cap->leaf.shaper_private_byte_mode_supported = 1;
 		cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
 	}
 
@@ -300,6 +310,8 @@ mrvl_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
 	cap->shaper_private_supported = 1;
 	cap->shaper_private_rate_min = MRVL_RATE_MIN;
 	cap->shaper_private_rate_max = priv->rate_max;
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
 
 	if (node->type == MRVL_NODE_PORT) {
 		cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
@@ -308,6 +320,8 @@ mrvl_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
 			dev->data->nb_tx_queues;
 		cap->nonleaf.sched_wfq_n_groups_max = 1;
 		cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX;
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 1;
 		cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
 	} else {
 		cap->stats_mask = RTE_TM_STATS_N_PKTS;
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 80a470c..ac14fe1 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -447,6 +447,8 @@ static const struct rte_tm_capabilities tm_cap = {
 	.shaper_private_dual_rate_n_max = 0,
 	.shaper_private_rate_min = 1,
 	.shaper_private_rate_max = UINT32_MAX,
+	.shaper_private_packet_mode_supported = 0,
+	.shaper_private_byte_mode_supported = 1,
 
 	.shaper_shared_n_max = UINT32_MAX,
 	.shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
@@ -454,6 +456,8 @@ static const struct rte_tm_capabilities tm_cap = {
 	.shaper_shared_dual_rate_n_max = 0,
 	.shaper_shared_rate_min = 1,
 	.shaper_shared_rate_max = UINT32_MAX,
+	.shaper_shared_packet_mode_supported = 0,
+	.shaper_shared_byte_mode_supported = 1,
 
 	.shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
 	.shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
@@ -463,6 +467,8 @@ static const struct rte_tm_capabilities tm_cap = {
 	.sched_wfq_n_children_per_group_max = UINT32_MAX,
 	.sched_wfq_n_groups_max = 1,
 	.sched_wfq_weight_max = UINT32_MAX,
+	.sched_wfq_packet_mode_supported = 0,
+	.sched_wfq_byte_mode_supported = 1,
 
 	.cman_wred_packet_mode_supported = WRED_SUPPORTED,
 	.cman_wred_byte_mode_supported = 0,
@@ -548,6 +554,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.shaper_private_dual_rate_supported = 0,
 			.shaper_private_rate_min = 1,
 			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_private_packet_mode_supported = 0,
+			.shaper_private_byte_mode_supported = 1,
 			.shaper_shared_n_max = 0,
 
 			.sched_n_children_max = UINT32_MAX,
@@ -555,6 +563,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.sched_wfq_n_children_per_group_max = UINT32_MAX,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = 1,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 0,
 
 			.stats_mask = STATS_MASK_DEFAULT,
 		} },
@@ -572,6 +582,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.shaper_private_dual_rate_supported = 0,
 			.shaper_private_rate_min = 1,
 			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_private_packet_mode_supported = 0,
+			.shaper_private_byte_mode_supported = 1,
 			.shaper_shared_n_max = 0,
 
 			.sched_n_children_max = UINT32_MAX,
@@ -580,9 +592,14 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.sched_wfq_n_groups_max = 1,
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 			.sched_wfq_weight_max = UINT32_MAX,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 1,
 #else
 			.sched_wfq_weight_max = 1,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 0,
 #endif
+
 			.stats_mask = STATS_MASK_DEFAULT,
 		} },
 	},
@@ -599,6 +616,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.shaper_private_dual_rate_supported = 0,
 			.shaper_private_rate_min = 1,
 			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_private_packet_mode_supported = 0,
+			.shaper_private_byte_mode_supported = 1,
 			.shaper_shared_n_max = 0,
 
 			.sched_n_children_max =
@@ -608,6 +627,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.sched_wfq_n_children_per_group_max = 1,
 			.sched_wfq_n_groups_max = 0,
 			.sched_wfq_weight_max = 1,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 0,
 
 			.stats_mask = STATS_MASK_DEFAULT,
 		} },
@@ -625,6 +646,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.shaper_private_dual_rate_supported = 0,
 			.shaper_private_rate_min = 1,
 			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_private_packet_mode_supported = 0,
+			.shaper_private_byte_mode_supported = 1,
 			.shaper_shared_n_max = 1,
 
 			.sched_n_children_max =
@@ -634,6 +657,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 				RTE_SCHED_BE_QUEUES_PER_PIPE,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = UINT32_MAX,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 1,
 
 			.stats_mask = STATS_MASK_DEFAULT,
 		} },
@@ -651,6 +676,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
 			.shaper_private_dual_rate_supported = 0,
 			.shaper_private_rate_min = 0,
 			.shaper_private_rate_max = 0,
+			.shaper_private_packet_mode_supported = 0,
+			.shaper_private_byte_mode_supported = 0,
 			.shaper_shared_n_max = 0,
 
 			.cman_head_drop_supported = 0,
@@ -736,6 +763,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 		.shaper_private_dual_rate_supported = 0,
 		.shaper_private_rate_min = 1,
 		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_private_packet_mode_supported = 0,
+		.shaper_private_byte_mode_supported = 1,
 		.shaper_shared_n_max = 0,
 
 		{.nonleaf = {
@@ -744,6 +773,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 			.sched_wfq_n_children_per_group_max = UINT32_MAX,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = 1,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 0,
 		} },
 
 		.stats_mask = STATS_MASK_DEFAULT,
@@ -754,6 +785,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 		.shaper_private_dual_rate_supported = 0,
 		.shaper_private_rate_min = 1,
 		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_private_packet_mode_supported = 0,
+		.shaper_private_byte_mode_supported = 1,
 		.shaper_shared_n_max = 0,
 
 		{.nonleaf = {
@@ -762,6 +795,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 			.sched_wfq_n_children_per_group_max = UINT32_MAX,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = UINT32_MAX,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 0,
 		} },
 
 		.stats_mask = STATS_MASK_DEFAULT,
@@ -772,6 +807,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 		.shaper_private_dual_rate_supported = 0,
 		.shaper_private_rate_min = 1,
 		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_private_packet_mode_supported = 0,
+		.shaper_private_byte_mode_supported = 1,
 		.shaper_shared_n_max = 0,
 
 		{.nonleaf = {
@@ -782,6 +819,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 			.sched_wfq_n_children_per_group_max = 1,
 			.sched_wfq_n_groups_max = 0,
 			.sched_wfq_weight_max = 1,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 0,
 		} },
 
 		.stats_mask = STATS_MASK_DEFAULT,
@@ -792,6 +831,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 		.shaper_private_dual_rate_supported = 0,
 		.shaper_private_rate_min = 1,
 		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_private_packet_mode_supported = 0,
+		.shaper_private_byte_mode_supported = 1,
 		.shaper_shared_n_max = 1,
 
 		{.nonleaf = {
@@ -802,6 +843,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 				RTE_SCHED_BE_QUEUES_PER_PIPE,
 			.sched_wfq_n_groups_max = 1,
 			.sched_wfq_weight_max = UINT32_MAX,
+			.sched_wfq_packet_mode_supported = 0,
+			.sched_wfq_byte_mode_supported = 1,
 		} },
 
 		.stats_mask = STATS_MASK_DEFAULT,
@@ -812,6 +855,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
 		.shaper_private_dual_rate_supported = 0,
 		.shaper_private_rate_min = 0,
 		.shaper_private_rate_max = 0,
+		.shaper_private_packet_mode_supported = 0,
+		.shaper_private_byte_mode_supported = 0,
 		.shaper_shared_n_max = 0,
 
 
-- 
2.8.4


  reply	other threads:[~2020-04-11 11:44 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-30 16:00 [dpdk-dev] [PATCH 1/2] ethdev: add tm cap for private shaper packet mode Nithin Dabilpuram
2020-03-30 16:00 ` [dpdk-dev] [PATCH 2/2] app/testpmd: add tm non leaf node pktmode command Nithin Dabilpuram
2020-04-07  7:30 ` [dpdk-dev] [PATCH 1/2] ethdev: add tm cap for private shaper packet mode Nithin Dabilpuram
2020-04-07 16:31 ` Dumitrescu, Cristian
2020-04-07 17:21   ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-04-10 11:45     ` Dumitrescu, Cristian
2020-04-10 11:56       ` Nithin Dabilpuram
2020-04-11 11:44 ` [dpdk-dev] [PATCH v2 1/4] ethdev: add tm support for shaper config in pkt mode Nithin Dabilpuram
2020-04-11 11:44   ` Nithin Dabilpuram [this message]
2020-04-11 11:44   ` [dpdk-dev] [PATCH v2 3/4] app/testpmd: add tm cmd for non leaf and shaper pktmode Nithin Dabilpuram
2020-04-11 11:44   ` [dpdk-dev] [PATCH v2 4/4] net/octeontx2: support tm length adjust and pkt mode Nithin Dabilpuram
2020-04-16 13:48   ` [dpdk-dev] [PATCH v2 1/4] ethdev: add tm support for shaper config in " Ferruh Yigit
2020-04-21  5:11     ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-04-21  9:30   ` [dpdk-dev] " Dumitrescu, Cristian
2020-04-21  9:58     ` Nithin Dabilpuram
2020-04-21 10:23       ` Dumitrescu, Cristian
2020-04-21 11:55         ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-04-22  7:59 ` [dpdk-dev] [PATCH v3] " Nithin Dabilpuram
2020-04-22  7:59   ` [dpdk-dev] [PATCH v3 2/4] drivers/net: update tm capability for existing pmds Nithin Dabilpuram
2020-04-22  7:59   ` [dpdk-dev] [PATCH v3 3/4] app/testpmd: add tm cmd for non leaf and shaper pktmode Nithin Dabilpuram
2020-04-22  7:59   ` [dpdk-dev] [PATCH v3 4/4] net/octeontx2: support tm length adjust and pkt mode Nithin Dabilpuram
2020-04-22  8:09   ` [dpdk-dev] [PATCH v3] ethdev: add tm support for shaper config in " Nithin Dabilpuram
2020-04-22 12:18     ` Singh, Jasvinder
2020-04-22 17:21       ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-04-22 10:10   ` [dpdk-dev] " Dumitrescu, Cristian
2020-04-22 11:31     ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-04-22 11:49       ` Nithin Dabilpuram
2020-04-22 11:59         ` Dumitrescu, Cristian
2020-04-22 12:01       ` Dumitrescu, Cristian
2020-04-22  8:05 ` [dpdk-dev] [PATCH v3 1/4] " Nithin Dabilpuram
2020-04-22 17:21 ` [dpdk-dev] [PATCH v4 " Nithin Dabilpuram
2020-04-22 17:21   ` [dpdk-dev] [PATCH v4 2/4] drivers/net: update tm capability for existing pmds Nithin Dabilpuram
2020-04-22 17:21   ` [dpdk-dev] [PATCH v4 3/4] app/testpmd: add tm cmd for non leaf and shaper pktmode Nithin Dabilpuram
2020-04-22 17:21   ` [dpdk-dev] [PATCH v4 4/4] net/octeontx2: support tm length adjust and pkt mode Nithin Dabilpuram
2020-04-24 10:28   ` [dpdk-dev] [PATCH v4 1/4] ethdev: add tm support for shaper config in " Dumitrescu, Cristian
2020-04-25 20:09     ` Ferruh Yigit
2020-04-27  9:19       ` Dumitrescu, Cristian
2020-04-27 16:12         ` Ferruh Yigit
2020-04-27 16:28           ` Dumitrescu, Cristian
2020-04-28 15:30             ` Thomas Monjalon
2020-04-28 17:35               ` Dumitrescu, Cristian
2020-04-27 16:29           ` Jerin Jacob
2020-04-27 16:49             ` Ferruh Yigit
2020-04-27 16:59               ` Jerin Jacob
2020-04-28 11:51                 ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-04-28 13:56                   ` Ferruh Yigit
2020-04-28 14:06                 ` [dpdk-dev] " Ferruh Yigit
2020-04-28 14:45                   ` Bruce Richardson
2020-04-28 15:04                     ` Luca Boccassi
2020-04-28 15:54                       ` Thomas Monjalon
2020-04-29  8:45                         ` Dumitrescu, Cristian
2020-04-29  9:03                           ` Bruce Richardson
2020-05-01 10:27                             ` Ferruh Yigit
2020-05-01 13:16                               ` [dpdk-dev] [EXT] " Nithin Dabilpuram
2020-08-25 16:59                                 ` Ferruh Yigit
2020-09-07 11:12                                   ` Nithin Dabilpuram
2020-09-14 13:01                                     ` Ferruh Yigit
2020-05-01 13:18                         ` [dpdk-dev] " Jerin Jacob
2020-05-05  8:01                           ` Ray Kinsella
2020-04-28 15:42                     ` Ray Kinsella

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200411114430.18506-2-nithind1988@gmail.com \
    --to=nithind1988@gmail.com \
    --cc=beilei.xing@intel.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    --cc=jasvinder.singh@intel.com \
    --cc=jerinj@marvell.com \
    --cc=kkanas@marvell.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=lironh@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=qi.z.zhang@intel.com \
    --cc=rosen.xu@intel.com \
    --cc=tdu@semihalf.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).