DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jasvinder Singh <jasvinder.singh@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com
Subject: [dpdk-dev] [PATCH 2/2] test/sched: add test for pipe profile add api
Date: Fri,  9 Mar 2018 18:41:14 +0000	[thread overview]
Message-ID: <20180309184114.139136-2-jasvinder.singh@intel.com> (raw)
In-Reply-To: <20180309184114.139136-1-jasvinder.singh@intel.com>

Update the unit test to check the working of new API.

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
---
 test/test/test_sched.c | 91 ++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 69 insertions(+), 22 deletions(-)

diff --git a/test/test/test_sched.c b/test/test/test_sched.c
index 32e500b..5d30187 100644
--- a/test/test/test_sched.c
+++ b/test/test/test_sched.c
@@ -44,6 +44,18 @@ static struct rte_sched_pipe_params pipe_profile[] = {
 	},
 };
 
+static struct rte_sched_pipe_params pipe_profile1 = {
+	/* Profile #1 */
+	.tb_rate = 300000,
+	.tb_size = 100000,
+
+	.tc_rate = {300000, 300000, 300000, 300000},
+	.tc_period = 40,
+
+	.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1},
+};
+
+
 static struct rte_sched_port_params port_param = {
 	.socket = 0, /* computed */
 	.rate = 0, /* computed */
@@ -102,7 +114,23 @@ prepare_pkt(struct rte_mbuf *mbuf)
 	mbuf->data_len = 60;
 }
 
+static int pipe_profile_set(struct rte_sched_port *port,
+	struct rte_sched_pipe_params *params)
+{
+	uint32_t pipe;
+	int32_t profile_id, err;
 
+	err = rte_sched_pipe_profile_add(port, params, &profile_id);
+	if (err)
+		return err;
+
+	for (pipe = 0; pipe < port_param.n_pipes_per_subport; pipe++) {
+		err = rte_sched_pipe_config(port, SUBPORT, pipe, profile_id);
+		TEST_ASSERT_SUCCESS(err, "Error config sched pipe %u, err=%d\n",
+			pipe, err);
+	}
+	return 0;
+}
 /**
  * test main entrance for library sched
  */
@@ -114,7 +142,7 @@ test_sched(void)
 	uint32_t pipe;
 	struct rte_mbuf *in_mbufs[10];
 	struct rte_mbuf *out_mbufs[10];
-	int i;
+	int i, pipe_profile = 0;
 
 	int err;
 
@@ -135,35 +163,42 @@ test_sched(void)
 		TEST_ASSERT_SUCCESS(err, "Error config sched pipe %u, err=%d\n", pipe, err);
 	}
 
-	for (i = 0; i < 10; i++) {
-		in_mbufs[i] = rte_pktmbuf_alloc(mp);
-		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
-		prepare_pkt(in_mbufs[i]);
-	}
+	while (pipe_profile < 2) {
 
+		for (i = 0; i < 10; i++) {
+			in_mbufs[i] = rte_pktmbuf_alloc(mp);
+			TEST_ASSERT_NOT_NULL(in_mbufs[i],
+				"Packet allocation failed\n");
+			prepare_pkt(in_mbufs[i]);
+		}
 
-	err = rte_sched_port_enqueue(port, in_mbufs, 10);
-	TEST_ASSERT_EQUAL(err, 10, "Wrong enqueue, err=%d\n", err);
+		err = rte_sched_port_enqueue(port, in_mbufs, 10);
+		TEST_ASSERT_EQUAL(err, 10, "Wrong enqueue, err=%d\n", err);
 
-	err = rte_sched_port_dequeue(port, out_mbufs, 10);
-	TEST_ASSERT_EQUAL(err, 10, "Wrong dequeue, err=%d\n", err);
+		err = rte_sched_port_dequeue(port, out_mbufs, 10);
+		TEST_ASSERT_EQUAL(err, 10, "Wrong dequeue, err=%d\n", err);
 
-	for (i = 0; i < 10; i++) {
-		enum rte_meter_color color;
-		uint32_t subport, traffic_class, queue;
+		for (i = 0; i < 10; i++) {
+			enum rte_meter_color color;
+			uint32_t subport, traffic_class, queue;
 
-		color = rte_sched_port_pkt_read_color(out_mbufs[i]);
-		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+			color = rte_sched_port_pkt_read_color(out_mbufs[i]);
+			TEST_ASSERT_EQUAL(color,
+				e_RTE_METER_YELLOW, "Wrong color\n");
 
-		rte_sched_port_pkt_read_tree_path(out_mbufs[i],
-				&subport, &pipe, &traffic_class, &queue);
+			rte_sched_port_pkt_read_tree_path(out_mbufs[i],
+					&subport,
+					&pipe,
+					&traffic_class,
+					&queue);
 
-		TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");
-		TEST_ASSERT_EQUAL(pipe, PIPE, "Wrong pipe\n");
-		TEST_ASSERT_EQUAL(traffic_class, TC, "Wrong traffic_class\n");
-		TEST_ASSERT_EQUAL(queue, QUEUE, "Wrong queue\n");
+			TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");
+			TEST_ASSERT_EQUAL(pipe, PIPE, "Wrong pipe\n");
+			TEST_ASSERT_EQUAL(traffic_class, TC,
+				"Wrong traffic_class\n");
+			TEST_ASSERT_EQUAL(queue, QUEUE, "Wrong queue\n");
 
-	}
+		}
 
 
 	struct rte_sched_subport_stats subport_stats;
@@ -179,6 +214,18 @@ test_sched(void)
 	TEST_ASSERT_EQUAL(queue_stats.n_pkts, 10, "Wrong queue stats\n");
 #endif
 
+		for (i = 0; i < 10; i++)
+			rte_pktmbuf_free(in_mbufs[i]);
+
+		pipe_profile += 1;
+
+		if (pipe_profile == 1) {
+			err = pipe_profile_set(port, &pipe_profile1);
+			TEST_ASSERT_EQUAL(err, 0,
+				"Profile not added, err=%d\n", err);
+		}
+	}
+
 	rte_sched_port_free(port);
 
 	return 0;
-- 
2.9.3

  reply	other threads:[~2018-03-09 18:41 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-09 18:41 [dpdk-dev] [PATCH 1/2] librte_sched: add post-init pipe profile api Jasvinder Singh
2018-03-09 18:41 ` Jasvinder Singh [this message]
2018-05-03 15:29 ` Dumitrescu, Cristian
2018-05-04  8:41   ` Singh, Jasvinder
2018-05-04 14:10 ` [dpdk-dev] [PATCH v2] " Jasvinder Singh
2018-05-04 14:29   ` Dumitrescu, Cristian

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180309184114.139136-2-jasvinder.singh@intel.com \
    --to=jasvinder.singh@intel.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).