DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
	Stephen Hemminger <stephen@networkplumber.org>
Subject: [PATCH v2 2/5] net/ice: improve Tx scheduler graph output
Date: Tue, 15 Oct 2024 16:19:54 +0100	[thread overview]
Message-ID: <20241015151957.1413286-3-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241015151957.1413286-1-bruce.richardson@intel.com>

The function to dump the TX scheduler topology only adds to the chart
nodes connected to TX queues or for the flow director VSI. Change the
function to work recursively from the root node and thereby include all
scheduler nodes, whether in use or not, in the dump.

Also, improve the output of the Tx scheduler graphing function:

* Add VSI details to each node in graph
* When number of children is >16, skip middle nodes to reduce size of
  the graph, otherwise dot output is unviewable for large hierarchies
* For VSIs other than zero, use dot's clustering method to put those
  VSIs into subgraphs with borders
* For leaf nodes, display queue numbers for the any nodes assigned to
  ethdev NIC Tx queues

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/ice/ice_diagnose.c | 196 ++++++++++++---------------------
 1 file changed, 69 insertions(+), 127 deletions(-)

diff --git a/drivers/net/ice/ice_diagnose.c b/drivers/net/ice/ice_diagnose.c
index c357554707..623d84e37d 100644
--- a/drivers/net/ice/ice_diagnose.c
+++ b/drivers/net/ice/ice_diagnose.c
@@ -545,29 +545,15 @@ static void print_rl_profile(struct ice_aqc_rl_profile_elem *prof,
 	fprintf(stream, "\t\t\t\t\t</td>\n");
 }
 
-static
-void print_elem_type(FILE *stream, u8 type)
+static const char *
+get_elem_type(u8 type)
 {
-	switch (type) {
-	case 1:
-		fprintf(stream, "root");
-		break;
-	case 2:
-		fprintf(stream, "tc");
-		break;
-	case 3:
-		fprintf(stream, "se_generic");
-		break;
-	case 4:
-		fprintf(stream, "entry_point");
-		break;
-	case 5:
-		fprintf(stream, "leaf");
-		break;
-	default:
-		fprintf(stream, "%d", type);
-		break;
-	}
+	static const char * const ice_sched_node_types[] = {
+			"Undefined", "Root", "TC", "SE Generic", "SW Entry", "Leaf"
+	};
+	if (type < RTE_DIM(ice_sched_node_types))
+		return ice_sched_node_types[type];
+	return "*UNKNOWN*";
 }
 
 static
@@ -602,7 +588,9 @@ void print_priority_mode(FILE *stream, bool flag)
 }
 
 static
-void print_node(struct ice_aqc_txsched_elem_data *data,
+void print_node(struct ice_sched_node *node,
+		struct rte_eth_dev_data *ethdata,
+		struct ice_aqc_txsched_elem_data *data,
 		struct ice_aqc_rl_profile_elem *cir_prof,
 		struct ice_aqc_rl_profile_elem *eir_prof,
 		struct ice_aqc_rl_profile_elem *shared_prof,
@@ -613,17 +601,19 @@ void print_node(struct ice_aqc_txsched_elem_data *data,
 
 	fprintf(stream, "\t\t\t<table>\n");
 
-	fprintf(stream, "\t\t\t\t<tr>\n");
-	fprintf(stream, "\t\t\t\t\t<td> teid </td>\n");
-	fprintf(stream, "\t\t\t\t\t<td> %d </td>\n", data->node_teid);
-	fprintf(stream, "\t\t\t\t</tr>\n");
-
-	fprintf(stream, "\t\t\t\t<tr>\n");
-	fprintf(stream, "\t\t\t\t\t<td> type </td>\n");
-	fprintf(stream, "\t\t\t\t\t<td>");
-	print_elem_type(stream, data->data.elem_type);
-	fprintf(stream, "</td>\n");
-	fprintf(stream, "\t\t\t\t</tr>\n");
+	fprintf(stream, "\t\t\t\t<tr><td>teid</td><td>%d</td></tr>\n", data->node_teid);
+	fprintf(stream, "\t\t\t\t<tr><td>type</td><td>%s</td></tr>\n",
+			get_elem_type(data->data.elem_type));
+	fprintf(stream, "\t\t\t\t<tr><td>VSI</td><td>%u</td></tr>\n", node->vsi_handle);
+	if (data->data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
+		for (uint16_t i = 0; i < ethdata->nb_tx_queues; i++) {
+			struct ice_tx_queue *q = ethdata->tx_queues[i];
+			if (q->q_teid == data->node_teid) {
+				fprintf(stream, "\t\t\t\t<tr><td>TXQ</td><td>%u</td></tr>\n", i);
+				break;
+			}
+		}
+	}
 
 	if (!detail)
 		goto brief;
@@ -705,8 +695,6 @@ void print_node(struct ice_aqc_txsched_elem_data *data,
 	fprintf(stream, "\t\tshape=plain\n");
 	fprintf(stream, "\t]\n");
 
-	if (data->parent_teid != 0xFFFFFFFF)
-		fprintf(stream, "\tNODE_%d -> NODE_%d\n", data->parent_teid, data->node_teid);
 }
 
 static
@@ -731,112 +719,92 @@ int query_rl_profile(struct ice_hw *hw,
 	return 0;
 }
 
-static
-int query_node(struct ice_hw *hw, uint32_t child, uint32_t *parent,
-	       uint8_t level, bool detail, FILE *stream)
+static int
+query_node(struct ice_hw *hw, struct rte_eth_dev_data *ethdata,
+		struct ice_sched_node *node, bool detail, FILE *stream)
 {
-	struct ice_aqc_txsched_elem_data data;
+	struct ice_aqc_txsched_elem_data *data = &node->info;
 	struct ice_aqc_rl_profile_elem cir_prof;
 	struct ice_aqc_rl_profile_elem eir_prof;
 	struct ice_aqc_rl_profile_elem shared_prof;
 	struct ice_aqc_rl_profile_elem *cp = NULL;
 	struct ice_aqc_rl_profile_elem *ep = NULL;
 	struct ice_aqc_rl_profile_elem *sp = NULL;
-	int status, ret;
-
-	status = ice_sched_query_elem(hw, child, &data);
-	if (status != ICE_SUCCESS) {
-		if (level == hw->num_tx_sched_layers) {
-			/* ignore the error when a queue has been stopped. */
-			PMD_DRV_LOG(WARNING, "Failed to query queue node %d.", child);
-			*parent = 0xffffffff;
-			return 0;
-		}
-		PMD_DRV_LOG(ERR, "Failed to query scheduling node %d.", child);
-		return -EINVAL;
-	}
-
-	*parent = data.parent_teid;
+	u8 level = node->tx_sched_layer;
+	int ret;
 
-	if (data.data.cir_bw.bw_profile_idx != 0) {
-		ret = query_rl_profile(hw, level, 0, data.data.cir_bw.bw_profile_idx, &cir_prof);
+	if (data->data.cir_bw.bw_profile_idx != 0) {
+		ret = query_rl_profile(hw, level, 0, data->data.cir_bw.bw_profile_idx, &cir_prof);
 
 		if (ret)
 			return ret;
 		cp = &cir_prof;
 	}
 
-	if (data.data.eir_bw.bw_profile_idx != 0) {
-		ret = query_rl_profile(hw, level, 1, data.data.eir_bw.bw_profile_idx, &eir_prof);
+	if (data->data.eir_bw.bw_profile_idx != 0) {
+		ret = query_rl_profile(hw, level, 1, data->data.eir_bw.bw_profile_idx, &eir_prof);
 
 		if (ret)
 			return ret;
 		ep = &eir_prof;
 	}
 
-	if (data.data.srl_id != 0) {
-		ret = query_rl_profile(hw, level, 2, data.data.srl_id, &shared_prof);
+	if (data->data.srl_id != 0) {
+		ret = query_rl_profile(hw, level, 2, data->data.srl_id, &shared_prof);
 
 		if (ret)
 			return ret;
 		sp = &shared_prof;
 	}
 
-	print_node(&data, cp, ep, sp, detail, stream);
+	print_node(node, ethdata, data, cp, ep, sp, detail, stream);
 
 	return 0;
 }
 
-static
-int query_nodes(struct ice_hw *hw,
-		uint32_t *children, int child_num,
-		uint32_t *parents, int *parent_num,
-		uint8_t level, bool detail,
-		FILE *stream)
+static int
+query_node_recursive(struct ice_hw *hw, struct rte_eth_dev_data *ethdata,
+		struct ice_sched_node *node, bool detail, FILE *stream)
 {
-	uint32_t parent;
-	int i;
-	int j;
-
-	*parent_num = 0;
-	for (i = 0; i < child_num; i++) {
-		bool exist = false;
-		int ret;
+	bool close = false;
+	if (node->parent != NULL && node->vsi_handle != node->parent->vsi_handle) {
+		fprintf(stream, "subgraph cluster_%u {\n", node->vsi_handle);
+		fprintf(stream, "\tlabel = \"VSI %u\";\n", node->vsi_handle);
+		close = true;
+	}
 
-		ret = query_node(hw, children[i], &parent, level, detail, stream);
-		if (ret)
-			return -EINVAL;
+	int ret = query_node(hw, ethdata, node, detail, stream);
+	if (ret != 0)
+		return ret;
 
-		for (j = 0; j < *parent_num; j++) {
-			if (parents[j] == parent) {
-				exist = true;
-				break;
-			}
+	for (uint16_t i = 0; i < node->num_children; i++) {
+		ret = query_node_recursive(hw, ethdata, node->children[i], detail, stream);
+		if (ret != 0)
+			return ret;
+		/* if we have a lot of nodes, skip a bunch in the middle */
+		if (node->num_children > 16 && i == 2) {
+			uint16_t inc = node->num_children - 5;
+			fprintf(stream, "\tn%d_children [label=\"... +%d child nodes ...\"];\n",
+					node->info.node_teid, inc);
+			fprintf(stream, "\tNODE_%d -> n%d_children;\n",
+					node->info.node_teid, node->info.node_teid);
+			i += inc;
 		}
-
-		if (!exist && parent != 0xFFFFFFFF)
-			parents[(*parent_num)++] = parent;
 	}
+	if (close)
+		fprintf(stream, "}\n");
+	if (node->info.parent_teid != 0xFFFFFFFF)
+		fprintf(stream, "\tNODE_%d -> NODE_%d\n",
+				node->info.parent_teid, node->info.node_teid);
 
 	return 0;
 }
 
-int rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream)
+int
+rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream)
 {
 	struct rte_eth_dev *dev;
 	struct ice_hw *hw;
-	struct ice_pf *pf;
-	struct ice_q_ctx *q_ctx;
-	uint16_t q_num;
-	uint16_t i;
-	struct ice_tx_queue *txq;
-	uint32_t buf1[256];
-	uint32_t buf2[256];
-	uint32_t *children = buf1;
-	uint32_t *parents = buf2;
-	int child_num = 0;
-	int parent_num = 0;
-	uint8_t level;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
 
@@ -846,35 +814,9 @@ int rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream)
 
 	dev = &rte_eth_devices[port];
 	hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	level = hw->num_tx_sched_layers;
-
-	q_num = dev->data->nb_tx_queues;
-
-	/* main vsi */
-	for (i = 0; i < q_num; i++) {
-		txq = dev->data->tx_queues[i];
-		q_ctx = ice_get_lan_q_ctx(hw, txq->vsi->idx, 0, i);
-		children[child_num++] = q_ctx->q_teid;
-	}
-
-	/* fdir vsi */
-	q_ctx = ice_get_lan_q_ctx(hw, pf->fdir.fdir_vsi->idx, 0, 0);
-	children[child_num++] = q_ctx->q_teid;
 
 	fprintf(stream, "digraph tx_sched {\n");
-	while (child_num > 0) {
-		int ret;
-		ret = query_nodes(hw, children, child_num,
-				  parents, &parent_num,
-				  level, detail, stream);
-		if (ret)
-			return ret;
-
-		children = parents;
-		child_num = parent_num;
-		level--;
-	}
+	query_node_recursive(hw, dev->data, hw->port_info->root, detail, stream);
 	fprintf(stream, "}\n");
 
 	return 0;
-- 
2.43.0


  parent reply	other threads:[~2024-10-15 15:20 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-09 17:08 [PATCH 0/5] updates for net/ice driver Bruce Richardson
2024-10-09 17:08 ` [PATCH 1/5] net/ice: detect stopping a flow-director queue twice Bruce Richardson
2024-10-09 17:44   ` Stephen Hemminger
2024-10-09 17:08 ` [PATCH 2/5] net/ice: improve Tx scheduler graph output Bruce Richardson
2024-10-09 17:45   ` Stephen Hemminger
2024-10-15 14:32     ` Bruce Richardson
2024-10-09 17:08 ` [PATCH 3/5] net/ice: add option to choose DDP package file Bruce Richardson
2024-10-09 17:47   ` Stephen Hemminger
2024-10-09 17:08 ` [PATCH 4/5] net/ice: add option to download scheduler topology Bruce Richardson
2024-10-09 17:49   ` Stephen Hemminger
2024-10-10  8:10     ` Bruce Richardson
2024-10-09 17:08 ` [PATCH 5/5] net/ice: limit the number of queues to sched capabilities Bruce Richardson
2024-10-09 17:49   ` Stephen Hemminger
2024-10-15 15:19 ` [PATCH v2 0/5] updates for net/ice driver Bruce Richardson
2024-10-15 15:19   ` [PATCH v2 1/5] net/ice: detect stopping a flow-director queue twice Bruce Richardson
2024-10-15 15:19   ` Bruce Richardson [this message]
2024-10-15 15:19   ` [PATCH v2 3/5] net/ice: add option to choose DDP package file Bruce Richardson
2024-10-15 15:19   ` [PATCH v2 4/5] net/ice: add option to download scheduler topology Bruce Richardson
2024-10-15 15:19   ` [PATCH v2 5/5] net/ice: limit the number of queues to sched capabilities Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241015151957.1413286-3-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).