test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update
@ 2020-03-25  8:10 Xiao Qimai
  2020-03-25  8:10 ` [dts] [PATCH V1 02/11]loopback_multi_queues: " Xiao Qimai
                   ` (12 more replies)
  0 siblings, 13 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 .../TestSuite_loopback_multi_paths_port_restart.py | 85 +++++++++++++++-------
 1 file changed, 57 insertions(+), 28 deletions(-)

diff --git a/tests/TestSuite_loopback_multi_paths_port_restart.py b/tests/TestSuite_loopback_multi_paths_port_restart.py
index 0b54b8d..5f1faad 100644
--- a/tests/TestSuite_loopback_multi_paths_port_restart.py
+++ b/tests/TestSuite_loopback_multi_paths_port_restart.py
@@ -121,15 +121,21 @@ class TestLoopbackPortRestart(TestCase):
                 break
             time.sleep(3)
             loop = loop + 1
-
         self.verify("down" not in port_status, "port can not up after restart")
 
-    def port_restart(self):
-        self.vhost.send_expect("stop", "testpmd> ", 120)
-        self.vhost.send_expect("port stop 0", "testpmd> ", 120)
-        self.check_port_throughput_after_port_stop()
-        self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
-        self.vhost.send_expect("port start all", "testpmd> ", 120)
+    def port_restart(self, restart_times=1):
+        if restart_times == 1:
+            self.vhost.send_expect("stop", "testpmd> ", 120)
+            self.vhost.send_expect("port stop 0", "testpmd> ", 120)
+            self.check_port_throughput_after_port_stop()
+            self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
+            self.vhost.send_expect("port start all", "testpmd> ", 120)
+        else:
+            for i in range(restart_times):
+                self.vhost.send_expect("stop", "testpmd> ", 120)
+                self.vhost.send_expect("port stop 0", "testpmd> ", 120)
+                self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
+                self.vhost.send_expect("port start all", "testpmd> ", 120)
         self.check_port_link_status_after_port_restart()
         self.vhost.send_expect("set burst 1", "testpmd> ", 120)
         self.vhost.send_expect("start tx_first 1", "testpmd> ", 120)
@@ -156,7 +162,7 @@ class TestLoopbackPortRestart(TestCase):
         self.verify(Mpps > 0, "%s can not receive packets" % self.running_case)
         return Mpps
 
-    def send_and_verify(self, case_info, frame_size):
+    def send_and_verify(self, case_info, frame_size, restart_times=1):
         """
         start to send packets and calculate the average throughput
         """
@@ -166,7 +172,7 @@ class TestLoopbackPortRestart(TestCase):
         Mpps = self.calculate_avg_throughput()
         self.update_table_info(case_info, frame_size, Mpps, "Before Restart")
 
-        self.port_restart()
+        self.port_restart(restart_times)
         Mpps = self.calculate_avg_throughput()
         self.update_table_info(case_info, frame_size, Mpps, "After Restart and set burst to 1")
 
@@ -184,7 +190,7 @@ class TestLoopbackPortRestart(TestCase):
         self.dut.close_session(self.vhost)
         self.dut.close_session(self.virtio_user)
 
-    def test_vhost_loopback_virtio11_mergeable_mac(self):
+    def test_loopback_test_with_packed_ring_mergeable_path(self):
         """
         performance for [frame_sizes] and restart port on virtio1.1 mergeable path
         """
@@ -193,11 +199,11 @@ class TestLoopbackPortRestart(TestCase):
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("virtio1.1 mergeable", frame_size)
+            self.send_and_verify("packed ring mergeable", frame_size)
             self.close_all_testpmd()
         self.result_table_print()
 
-    def test_vhost_loopback_virtio11_normal_mac(self):
+    def test_loopback_test_with_packed_ring_nonmergeable_path(self):
         """
         performance for [frame_sizes] and restart port ob virtio1.1 normal path
         """
@@ -206,63 +212,86 @@ class TestLoopbackPortRestart(TestCase):
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("virtio1.1 normal", frame_size)
+            self.send_and_verify("packed ring non-mergeable", frame_size)
+            self.close_all_testpmd()
+        self.result_table_print()
+
+    def test_lookback_test_with_packed_ring_inorder_mergeable_path(self):
+        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=1,in_order=1",
+                   "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+        for frame_size in self.frame_sizes:
+            self.start_vhost_testpmd()
+            self.start_virtio_user_testpmd(pmd_arg)
+            self.send_and_verify("packed ring non-mergeable", frame_size)
             self.close_all_testpmd()
         self.result_table_print()
 
-    def test_vhost_loopback_virtiouser_inorder_mergeable_mac(self):
+    def test_lookback_test_with_packed_ring_inorder_nonmergeable_path(self):
         """
         performance for [frame_sizes] and restart port on inorder mergeable path
         """
-        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1 ",
-                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
+        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=0,in_order=1",
+                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("inorder mergeable", frame_size)
+            self.send_and_verify("packed ring inorder non-mergeable", frame_size)
             self.close_all_testpmd()
         self.result_table_print()
 
-    def test_vhost_loopback_virtiouser_inorder_mergeable_off_mac(self):
+    def test_lookback_test_with_split_ring_inorder_mergeable_path(self):
         """
         performance for [frame_sizes] and restart port on inorder normal path
         """
+        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
+                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
+        for frame_size in self.frame_sizes:
+            self.start_vhost_testpmd()
+            self.start_virtio_user_testpmd(pmd_arg)
+            self.send_and_verify("split ring inorder mergeable", frame_size)
+            self.close_all_testpmd()
+        self.result_table_print()
+
+    def test_lookback_test_with_split_ring_inorder_nonmergeable_path(self):
+        """
+        performance for [frame_sizes] and restart port on virtio normal path
+        """
         pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0 ",
                           "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("inorder normal", frame_size)
+            self.send_and_verify("split ring inorder non-mergeable", frame_size)
             self.close_all_testpmd()
         self.result_table_print()
 
-    def test_vhost_loopback_virtiouser_mergeable_mac(self):
+    def test_lookback_test_with_split_ring_mergeable_path(self):
         """
-        performance for [frame_sizes] and restart port on virtio mergeable path
+        performance for [frame_sizes] and restart port on virtio normal path
         """
-        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1 ",
+        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
                           "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("virtiouser mergeable", frame_size)
+            self.send_and_verify("split ring mergeable", frame_size, restart_times=100)
             self.close_all_testpmd()
         self.result_table_print()
 
-    def test_vhost_loopback_virtiouser_normal_mac(self):
+    def test_lookback_test_with_split_ring_nonmergeable_path(self):
         """
         performance for [frame_sizes] and restart port on virtio normal path
         """
-        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0 ",
+        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
                           "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("virtiouser normal", frame_size)
+            self.send_and_verify("split ring non-mergeable", frame_size)
             self.close_all_testpmd()
         self.result_table_print()
 
-    def test_vhost_loopback_virtiouser_vector_rx_mac(self):
+    def test_loopback_test_with_split_ring_vector_rx_path(self):
         """
         performance for frame_sizes and restart port on virtio vector rx
         """
@@ -271,7 +300,7 @@ class TestLoopbackPortRestart(TestCase):
         for frame_size in self.frame_sizes:
             self.start_vhost_testpmd()
             self.start_virtio_user_testpmd(pmd_arg)
-            self.send_and_verify("virtiouser vector_rx", frame_size)
+            self.send_and_verify("split ring vector_rx", frame_size)
             self.close_all_testpmd()
         self.result_table_print()
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 02/11]loopback_multi_queues: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:23   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: " Xiao Qimai
                   ` (11 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_loopback_multi_queues.py | 55 +++++++++++++++++++++++++++-----
 1 file changed, 47 insertions(+), 8 deletions(-)

diff --git a/tests/TestSuite_loopback_multi_queues.py b/tests/TestSuite_loopback_multi_queues.py
index 589bf06..174ce01 100644
--- a/tests/TestSuite_loopback_multi_queues.py
+++ b/tests/TestSuite_loopback_multi_queues.py
@@ -98,7 +98,7 @@ class TestLoopbackMultiQueues(TestCase):
         start testpmd on virtio
         """
         eal_param = self.dut.create_eal_parameters(cores=self.core_list_user, prefix='virtio', no_pci=True, vdevs=['net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,queues=%d,%s' % (self.queue_number, args["version"])])
-        command_line_user = self.dut.target + "/app/testpmd " + eal_param + " -- -i %s --rss-ip --nb-cores=%d --rxq=%d --txq=%d --txd=1024 --rxd=1024" % (args["path"], self.nb_cores, self.queue_number, self.queue_number)
+        command_line_user = self.dut.target + "/app/testpmd " + eal_param + " -- -i %s --nb-cores=%d --rxq=%d --txq=%d --txd=1024 --rxd=1024" % (args["path"], self.nb_cores, self.queue_number, self.queue_number)
         self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
         self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
         self.virtio_user.send_expect("start", "testpmd> ", 120)
@@ -162,6 +162,7 @@ class TestLoopbackMultiQueues(TestCase):
             if self.queue_number > 1:
                 self.check_packets_of_each_queue(frame_size)
 
+
     def verify_liner_for_multi_queue(self):
         """
         verify the Mpps of 8 queues is eight times of 1 queue
@@ -191,7 +192,7 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP virtio 1.1 Mergeable Path.
         """
         virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1",
-                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+                            "path": ""}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
@@ -209,7 +210,7 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP virtio1.1 Normal Path.
         """
         virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0",
-                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+                            "path": ""}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
@@ -227,7 +228,7 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP In_order mergeable Path.
         """
         virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
-                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+                            "path": ""}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
@@ -245,7 +246,7 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP In_order no_mergeable Path.
         """
         virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0",
-                        "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+                        "path": ""}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
@@ -263,12 +264,14 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP Mergeable Path.
         """
         virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
-                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+                            "path": ""}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
             self.get_core_mask()
             self.start_vhost_testpmd()
+            if self.queue_number == 8:
+                virtio_pmd_arg["path"] = "--enable-hw-vlan-strip"
             self.start_virtio_testpmd(virtio_pmd_arg)
             self.send_and_verify("virito mergeable")
             self.close_all_testpmd()
@@ -281,7 +284,7 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP Normal Path.
         """
         virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
-                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
+                            "path": "--enable-hw-vlan-strip"}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
@@ -299,7 +302,7 @@ class TestLoopbackMultiQueues(TestCase):
         performance for Vhost PVP Vector_RX Path
         """
         virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
-                            "path": "--tx-offloads=0x0"}
+                            "path": ""}
         for i in self.verify_queue:
             self.nb_cores = i
             self.queue_number = i
@@ -312,6 +315,42 @@ class TestLoopbackMultiQueues(TestCase):
         self.result_table_print()
         self.verify_liner_for_multi_queue()
 
+    def test_loopback_with_virtio11_inorder_mergeable_path_multi_queue(self):
+        """
+        performance for Vhost PVP Vector_RX Path
+        """
+        virtio_pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=1,in_order=1",
+                            "path": ""}
+        for i in self.verify_queue:
+            self.nb_cores = i
+            self.queue_number = i
+            self.get_core_mask()
+            self.start_vhost_testpmd()
+            self.start_virtio_testpmd(virtio_pmd_arg)
+            self.send_and_verify("virtio 1.1 inorder mergeable")
+            self.close_all_testpmd()
+
+        self.result_table_print()
+        self.verify_liner_for_multi_queue()
+
+    def test_loopback_with_virtio11_inorder_nonmergeable_path_multi_queue(self):
+        """
+        performance for Vhost PVP Vector_RX Path
+        """
+        virtio_pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=0,in_order=1",
+                            "path": ""}
+        for i in self.verify_queue:
+            self.nb_cores = i
+            self.queue_number = i
+            self.get_core_mask()
+            self.start_vhost_testpmd()
+            self.start_virtio_testpmd(virtio_pmd_arg)
+            self.send_and_verify("virtio 1.1 inorder non-mergeable")
+            self.close_all_testpmd()
+
+        self.result_table_print()
+        self.verify_liner_for_multi_queue()
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
  2020-03-25  8:10 ` [dts] [PATCH V1 02/11]loopback_multi_queues: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:23   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: " Xiao Qimai
                   ` (10 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_pvp_virtio_user_2M_hugepages.py | 22 ++++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)

diff --git a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
index 9d2eaed..ac7187c 100644
--- a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
+++ b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
@@ -128,20 +128,20 @@ class TestPVPVirtioWith2Mhuge(TestCase):
         start testpmd on vhost
         """
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = [r"'net_vhost0,iface=vhost-net,queues=1'"]
+        vdev = ["net_vhost0,iface=vhost-net,queues=1"]
         eal_params = self.dut.create_eal_parameters(cores=self.core_list_vhost_user, prefix='vhost', ports=[self.pci_info], vdevs=vdev)
         command_line_client = testcmd + eal_params + " -- -i"
         self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
         self.vhost_user.send_expect("start", "testpmd> ", 120)
 
-    def start_testpmd_as_virtio(self):
+    def start_testpmd_as_virtio(self, packed=False):
         """
         start testpmd on virtio
         """
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = " --single-file-segments --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1 -- -i"
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_virtio_user, no_pci=True, prefix='virtio-user', ports=[self.pci_info])
-        command_line_user = testcmd + eal_params + vdev
+        vdev = 'net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1' if not packed else 'net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1,packed_vq=1'
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_virtio_user, no_pci=True, prefix='virtio-user', ports=[self.pci_info], vdevs=[vdev])
+        command_line_user = testcmd + eal_params + ' --single-file-segments -- -i'
         self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
         self.virtio_user.send_expect("start", "testpmd> ", 120)
 
@@ -154,7 +154,7 @@ class TestPVPVirtioWith2Mhuge(TestCase):
         self.dut.close_session(self.vhost_user)
         self.dut.close_session(self.virtio_user)
 
-    def test_perf_pvp_virtio_user_with_2M_hugepages(self):
+    def test_perf_pvp_virtio_user_split_ring_2M_hugepages(self):
         """
         Basic test for virtio-user 2M hugepage
         """
@@ -164,6 +164,16 @@ class TestPVPVirtioWith2Mhuge(TestCase):
         self.result_table_print()
         self.close_all_apps()
 
+    def test_perf_pvp_virtio_user_packed_ring_2M_hugepages(self):
+        """
+        Basic test for virtio-user 2M hugepage
+        """
+        self.start_testpmd_as_vhost()
+        self.start_testpmd_as_virtio(packed=True)
+        self.send_and_verify()
+        self.result_table_print()
+        self.close_all_apps()
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
  2020-03-25  8:10 ` [dts] [PATCH V1 02/11]loopback_multi_queues: " Xiao Qimai
  2020-03-25  8:10 ` [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:20   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: " Xiao Qimai
                   ` (9 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_pvp_virtio_user_4k_pages.py | 28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

diff --git a/tests/TestSuite_pvp_virtio_user_4k_pages.py b/tests/TestSuite_pvp_virtio_user_4k_pages.py
index 5e6f657..532b6c1 100644
--- a/tests/TestSuite_pvp_virtio_user_4k_pages.py
+++ b/tests/TestSuite_pvp_virtio_user_4k_pages.py
@@ -130,21 +130,21 @@ class TestPvpVirtioUser4kPages(TestCase):
         Start testpmd on vhost
         """
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = " -m 1024 --no-huge --vdev 'net_vhost0,iface=vhost-net,queues=1'"
+        vdev = 'net_vhost0,iface=vhost-net,queues=1'
         para = " -- -i --no-numa --socket-num=%d" % self.ports_socket
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_vhost_user, prefix='vhost', ports=[self.pci_info])
-        command_line_client = testcmd + eal_params + vdev + para
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_vhost_user, prefix='vhost', ports=[self.pci_info], vdevs=[vdev])
+        command_line_client = testcmd + eal_params + ' -m 1024 --no-huge' + para
         self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
         self.vhost_user.send_expect("start", "testpmd> ", 120)
 
-    def start_testpmd_as_virtio(self):
+    def start_testpmd_as_virtio(self, packed=False):
         """
         Start testpmd on virtio
         """
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = " --no-huge -m 1024 --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1 -- -i"
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_virtio_user, prefix='virtio-user', ports=[self.pci_info])
-        command_line_user = testcmd + eal_params + vdev
+        vdev = "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1" if not packed else "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,packed_vq=1,queues=1"
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_virtio_user, prefix='virtio-user', ports=[self.pci_info], vdevs=[vdev])
+        command_line_user = testcmd + eal_params + ' --no-huge -m 1024 -- -i'
         self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
         self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
         self.virtio_user.send_expect("start", "testpmd> ", 120)
@@ -168,7 +168,7 @@ class TestPvpVirtioUser4kPages(TestCase):
         self.dut.close_session(self.vhost_user)
         self.dut.close_session(self.virtio_user)
 
-    def test_perf_pvp_virtio_user_with_4K_pages(self):
+    def test_perf_pvp_virtio_user_split_ring_with_4K_pages(self):
         """
         Basic test for virtio-user 4K pages
         """
@@ -179,6 +179,18 @@ class TestPvpVirtioUser4kPages(TestCase):
         self.result_table_print()
         self.close_all_apps()
 
+    def test_perf_pvp_virtio_user_packed_ring_with_4K_pages(self):
+        """
+        Basic test for virtio-user 4K pages
+        """
+        self.start_testpmd_as_vhost()
+        self.prepare_tmpfs_for_4k()
+        self.start_testpmd_as_virtio(packed=True)
+        self.send_and_verify()
+        self.result_table_print()
+        self.close_all_apps()
+
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (2 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: " Xiao Qimai
                   ` (8 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_vhost_enqueue_interrupt.py | 36 +++++++++++++++++++++++-------
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/tests/TestSuite_vhost_enqueue_interrupt.py b/tests/TestSuite_vhost_enqueue_interrupt.py
index 509c7f3..9d0e024 100644
--- a/tests/TestSuite_vhost_enqueue_interrupt.py
+++ b/tests/TestSuite_vhost_enqueue_interrupt.py
@@ -83,14 +83,14 @@ class TestVhostEnqueueInterrupt(TestCase):
         self.core_list_virtio = core_list[0: self.queues+1]
         self.core_list_l3fwd = core_list[self.queues+1: need_num]
 
-    def lanuch_virtio_user(self):
+    def lanuch_virtio_user(self, packed=False):
         """
         launch virtio-user with server mode
         """
-        vdev = "--vdev=net_virtio_user0,mac=%s,path=./vhost-net,server=1,queues=%d" % (self.vmac, self.queues)
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_virtio, prefix='virtio', no_pci=True, ports=[self.pci_info])
+        vdev = "net_virtio_user0,mac=%s,path=./vhost-net,server=1,queues=%d" % (self.vmac, self.queues) if not packed else "net_virtio_user0,mac=%s,path=./vhost-net,server=1,queues=%d,packed_vq=1" % (self.vmac, self.queues)
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_virtio, prefix='virtio', no_pci=True, ports=[self.pci_info], vdevs=[vdev])
         para = " -- -i --rxq=%d --txq=%d --rss-ip" % (self.queues, self.queues)
-        command_line_client =  self.dut.target + "/app/testpmd " + eal_params + vdev + para
+        command_line_client =  self.dut.target + "/app/testpmd " + eal_params + para
         self.virtio_user.send_expect(command_line_client, "testpmd> ", 120)
         self.virtio_user.send_expect("set fwd txonly", "testpmd> ", 20)
 
@@ -109,9 +109,9 @@ class TestVhostEnqueueInterrupt(TestCase):
             self.verify_info.append(info)
 
         example_cmd = "./examples/l3fwd-power/build/l3fwd-power "
-        vdev = [r"'net_vhost0,iface=vhost-net,queues=%d,client=1'" % self.queues]
+        vdev = 'net_vhost0,iface=vhost-net,queues=%d,client=1' % self.queues
         para = " -- -p 0x1 --parse-ptype 1 --config '%s' " % config_info
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_l3fwd, no_pci=True, ports=[self.pci_info], vdevs=vdev)
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_l3fwd, no_pci=True, ports=[self.pci_info], vdevs=[vdev])
         command_line_client = example_cmd + eal_params + para
         self.vhost.get_session_before(timeout=2)
         self.vhost.send_expect(command_line_client, "POWER", 40)
@@ -156,7 +156,7 @@ class TestVhostEnqueueInterrupt(TestCase):
         self.dut.close_session(self.vhost)
         self.dut.close_session(self.virtio_user)
 
-    def test_virtio_user_interrupt(self):
+    def test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample(self):
         """
         Check the virtio-user interrupt can work when use vhost-net as backend
         """
@@ -166,7 +166,7 @@ class TestVhostEnqueueInterrupt(TestCase):
         self.lanuch_l3fwd_power()
         self.send_and_verify()
 
-    def test_virtio_user_interrupt_with_multi_queue(self):
+    def test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample_when_multi_queues_enabled(self):
         """
         Check the virtio-user interrupt can work with multi queue
         """
@@ -176,6 +176,26 @@ class TestVhostEnqueueInterrupt(TestCase):
         self.lanuch_l3fwd_power()
         self.send_and_verify()
 
+    def test_wake_up_packed_ring_vhost_user_core_with_l3fwd_power_sample(self):
+        """
+        Check the virtio-user interrupt can work when use vhost-net as backend
+        """
+        self.queues = 1
+        self.get_core_list()
+        self.lanuch_virtio_user(packed=True)
+        self.lanuch_l3fwd_power()
+        self.send_and_verify()
+
+    def test_wake_up_packed_ring_vhost_user_core_with_l3fwd_power_sample_when_multi_queues_enabled(self):
+        """
+        Check the virtio-user interrupt can work with multi queue
+        """
+        self.queues = 4
+        self.get_core_list()
+        self.lanuch_virtio_user(packed=True)
+        self.lanuch_l3fwd_power()
+        self.send_and_verify()
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (3 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:21   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 07/11]vhost_user_live_migration: " Xiao Qimai
                   ` (7 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_vhost_event_idx_interrupt.py | 52 +++++++++++++++++++++++++---
 1 file changed, 47 insertions(+), 5 deletions(-)

diff --git a/tests/TestSuite_vhost_event_idx_interrupt.py b/tests/TestSuite_vhost_event_idx_interrupt.py
index fb7da60..5a9f932 100644
--- a/tests/TestSuite_vhost_event_idx_interrupt.py
+++ b/tests/TestSuite_vhost_event_idx_interrupt.py
@@ -161,7 +161,7 @@ class TestVhostEventIdxInterrupt(TestCase):
                     'This qemu version should greater than 2.7 ' + \
                     'in this suite, please config it in vhost_sample.cfg file')
 
-    def start_vms(self, vm_num=1):
+    def start_vms(self, vm_num=1, packed=False):
         """
         start qemus
         """
@@ -178,6 +178,8 @@ class TestVhostEventIdxInterrupt(TestCase):
                 opt_args = "csum=on,mq=on,vectors=%d" % (2*self.queues + 2)
             else:
                 opt_args = "csum=on"
+            if packed:
+                opt_args = opt_args + ',packed=on'
             vm_params['opt_settings'] = opt_args
             vm_info.set_vm_device(**vm_params)
             self.set_vm_cpu_number(vm_info)
@@ -250,9 +252,9 @@ class TestVhostEventIdxInterrupt(TestCase):
         """
         for i in range(len(self.vm)):
             self.vm[i].stop()
-        self.vhost.send_expect("^c", "#", 10)
+        self.dut.send_expect("killall l3fwd-power", "#", timeout=2)
 
-    def test_vhost_idx_interrupt(self):
+    def test_wake_up_split_ring_vhost_user_core_with_event_idx_interrupt(self):
         """
         wake up vhost-user core with l3fwd-power sample
         """
@@ -265,7 +267,7 @@ class TestVhostEventIdxInterrupt(TestCase):
         self.send_and_verify()
         self.stop_all_apps()
 
-    def test_vhost_idx_interrupt_with_multi_queue(self):
+    def test_wake_up_split_ring_vhost_user_cores_with_event_idx_interrupt_mode_16_queues(self):
         """
         wake up vhost-user core with l3fwd-power sample when multi queues are enabled
         """
@@ -279,7 +281,7 @@ class TestVhostEventIdxInterrupt(TestCase):
         self.send_and_verify()
         self.stop_all_apps()
 
-    def test_vhost_idx_interrupt_with_multi_vms(self):
+    def test_wake_up_split_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_event_idx_interrupt(self):
         """
         wake up vhost-user cores with l3fwd-power sample and multi VMs
         """
@@ -292,6 +294,46 @@ class TestVhostEventIdxInterrupt(TestCase):
         self.send_and_verify()
         self.stop_all_apps()
 
+    def test_wake_up_packed_ring_vhost_user_core_with_event_idx_interrupt(self):
+        """
+        wake up vhost-user core with l3fwd-power sample
+        """
+        self.vm_num = 1
+        self.queues = 1
+        self.get_core_mask()
+        self.lanuch_l3fwd_power()
+        self.start_vms(vm_num=self.vm_num, packed=True)
+        self.relanuch_l3fwd_power()
+        self.send_and_verify()
+        self.stop_all_apps()
+
+    def test_wake_up_packed_ring_vhost_user_cores_with_event_idx_interrupt_mode_16_queues(self):
+        """
+        wake up vhost-user core with l3fwd-power sample when multi queues are enabled
+        """
+        self.vm_num = 1
+        self.queues = 16
+        self.get_core_mask()
+        self.lanuch_l3fwd_power()
+        self.start_vms(vm_num=self.vm_num, packed=True)
+        self.relanuch_l3fwd_power()
+        self.config_virito_net_in_vm()
+        self.send_and_verify()
+        self.stop_all_apps()
+
+    def test_wake_up_packed_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_event_idx_interrupt(self):
+        """
+        wake up vhost-user cores with l3fwd-power sample and multi VMs
+        """
+        self.vm_num = 2
+        self.queues = 1
+        self.get_core_mask()
+        self.lanuch_l3fwd_power()
+        self.start_vms(vm_num=self.vm_num, packed=True)
+        self.relanuch_l3fwd_power()
+        self.send_and_verify()
+        self.stop_all_apps()
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 07/11]vhost_user_live_migration: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (4 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: " Xiao Qimai
                   ` (6 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_vhost_user_live_migration.py | 71 +++++++++++++++++++++++++---
 1 file changed, 65 insertions(+), 6 deletions(-)

diff --git a/tests/TestSuite_vhost_user_live_migration.py b/tests/TestSuite_vhost_user_live_migration.py
index 5c5db0d..cb6789c 100644
--- a/tests/TestSuite_vhost_user_live_migration.py
+++ b/tests/TestSuite_vhost_user_live_migration.py
@@ -151,7 +151,7 @@ class TestVhostUserLiveMigration(TestCase):
         if zero_copy is True:
             zero_copy_str = ',dequeue-zero-copy=1'
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = [r"'eth_vhost0,iface=%s/vhost-net,queues=%d%s'" % (self.base_dir, self.queue_number, zero_copy_str)]
+        vdev = ['eth_vhost0,iface=%s/vhost-net,queues=%d%s' % (self.base_dir, self.queue_number, zero_copy_str)]
         para = " -- -i --nb-cores=%d --rxq=%d --txq=%d" % (self.queue_number, self.queue_number, self.queue_number)
         eal_params_first = self.dut.create_eal_parameters(cores=self.core_list0, prefix='vhost', ports=[self.host_pci_info], vdevs=vdev)
         eal_params_secondary = self.dut.create_eal_parameters(cores=self.core_list1, prefix='vhost', ports=[self.backup_pci_info], vdevs=vdev)
@@ -166,7 +166,7 @@ class TestVhostUserLiveMigration(TestCase):
         self.backup_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ', 30)
         self.backup_dut.send_expect('start', 'testpmd> ', 30)
 
-    def setup_vm_env_on_both_dut(self, driver='default'):
+    def setup_vm_env_on_both_dut(self, driver='default', packed=False):
         """
         Create testing environment on Host and Backup
         """
@@ -183,6 +183,8 @@ class TestVhostUserLiveMigration(TestCase):
             if self.queue_number > 1:
                 vhost_params['opt_queue'] = self.queue_number
                 opt_params = 'mrg_rxbuf=on,mq=on,vectors=%d' % (2*self.queue_number + 2)
+            if packed:
+                opt_params = opt_params + ',packed=on'
             vhost_params['opt_settings'] = opt_params
             self.host_vm.set_vm_device(**vhost_params)
 
@@ -385,7 +387,7 @@ class TestVhostUserLiveMigration(TestCase):
         # make sure still can receive packets
         verify_fun(self.vm_dut_backup)
 
-    def test_migrate_with_virtio_net(self):
+    def test_migrate_with_split_ring_virtio_net(self):
         """
         Verify migrate virtIO device from host to backup host,
         Verify before/in/after migration, device with kernel driver can receive packets
@@ -402,7 +404,7 @@ class TestVhostUserLiveMigration(TestCase):
 
         self.send_and_verify(self.verify_kernel)
 
-    def test_migrete_with_vritio_net_with_multi_queue(self):
+    def test_adjust_split_ring_virtio_net_queue_numbers_while_migreting_with_virtio_net(self):
         self.queue_number = 4
         self.launch_testpmd_as_vhost_on_both_dut()
         self.start_testpmd_with_fwd_mode_on_both_dut()
@@ -414,7 +416,7 @@ class TestVhostUserLiveMigration(TestCase):
 
         self.send_and_verify(self.verify_kernel, True)
 
-    def test_migrate_with_virtio_pmd(self):
+    def test_migrate_with_split_ring_virtio_pmd(self):
         self.queue_number = 1
         self.launch_testpmd_as_vhost_on_both_dut()
         self.start_testpmd_with_fwd_mode_on_both_dut()
@@ -426,7 +428,7 @@ class TestVhostUserLiveMigration(TestCase):
 
         self.send_and_verify(self.verify_dpdk)
 
-    def test_migrate_with_zero_copy_virtio_pmd(self):
+    def test_migrate_with_split_ring_virtio_pmd_zero_copy(self):
         self.queue_number = 1
         zero_copy = True
         # start testpmd and qemu on dut
@@ -442,6 +444,63 @@ class TestVhostUserLiveMigration(TestCase):
 
         self.send_and_verify(self.verify_dpdk)
 
+    def test_migrate_with_packed_ring_virtio_pmd(self):
+        self.queue_number = 1
+        self.launch_testpmd_as_vhost_on_both_dut()
+        self.start_testpmd_with_fwd_mode_on_both_dut()
+        self.setup_vm_env_on_both_dut(packed=True)
+
+        # bind virtio-net to igb_uio
+        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
+        self.start_testpmd_on_vm(self.vm_dut_host)
+
+        self.send_and_verify(self.verify_dpdk)
+
+    def test_migrate_with_packed_ring_virtio_pmd_zero_copy(self):
+        self.queue_number = 1
+        zero_copy = True
+        # start testpmd and qemu on dut
+        # after qemu start ok, then send 'start' command to testpmd
+        # if send 'start' command before start qemu, maybe qemu will start failed
+        self.launch_testpmd_as_vhost_on_both_dut(zero_copy)
+        self.setup_vm_env_on_both_dut(packed=True)
+        self.start_testpmd_with_fwd_mode_on_both_dut()
+
+        # bind virtio-net to igb_uio
+        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
+        self.start_testpmd_on_vm(self.vm_dut_host)
+
+        self.send_and_verify(self.verify_dpdk)
+
+    def test_migrate_with_packed_ring_virtio_net(self):
+        """
+        Verify migrate virtIO device from host to backup host,
+        Verify before/in/after migration, device with kernel driver can receive packets
+        """
+        self.queue_number = 1
+        self.launch_testpmd_as_vhost_on_both_dut()
+        self.start_testpmd_with_fwd_mode_on_both_dut()
+        self.setup_vm_env_on_both_dut(packed=True)
+
+        # bind virtio-net back to virtio-pci
+        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
+        # start screen and tcpdump on vm
+        self.start_tcpdump_on_vm(self.vm_dut_host)
+
+        self.send_and_verify(self.verify_kernel)
+
+    def test_adjust_packed_ring_virtio_net_queue_numbers_while_migreting_with_virtio_net(self):
+        self.queue_number = 4
+        self.launch_testpmd_as_vhost_on_both_dut()
+        self.start_testpmd_with_fwd_mode_on_both_dut()
+        self.setup_vm_env_on_both_dut(packed=True)
+
+        # bind virtio-net back to virtio-pci
+        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
+        self.start_tcpdump_on_vm(self.vm_dut_host)
+
+        self.send_and_verify(self.verify_kernel, True)
+
     def tear_down(self):
         self.destroy_vm_env()
         # stop send packet on tester
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (5 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 07/11]vhost_user_live_migration: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: " Xiao Qimai
                   ` (5 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_vhost_virtio_pmd_interrupt.py | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt.py b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
index 5e6437e..ca69932 100644
--- a/tests/TestSuite_vhost_virtio_pmd_interrupt.py
+++ b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
@@ -117,7 +117,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
         # get the core list depend on current nb_cores number
         self.get_core_list()
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = [r"'net_vhost0,iface=%s/vhost-net,queues=%d'" % (self.base_dir, self.queues)]
+        vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' % (self.base_dir, self.queues)]
         eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=[self.pci_info], vdevs=vdev)
         para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --rss-ip" % (self.nb_cores, self.queues, self.queues)
         command_line_client = testcmd + eal_params + para
@@ -166,7 +166,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
             if list(self.vm.params[i].keys())[0] == 'cpu':
                 self.vm.params[i]['cpu'][0]['number'] = self.queues
 
-    def start_vms(self, mode=0):
+    def start_vms(self, mode=0, packed=False):
         """
         start qemus
         """
@@ -177,7 +177,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
         vm_params['opt_path'] = '%s/vhost-net' % self.base_dir
         vm_params['opt_mac'] = "00:11:22:33:44:55"
         vm_params['opt_queue'] = self.queues
-        opt_param = "mrg_rxbuf=on,csum=on,mq=on,vectors=%d" % (2*self.queues+2)
+        opt_param = "mrg_rxbuf=on,csum=on,mq=on,vectors=%d" % (2*self.queues+2) if not packed else "mrg_rxbuf=on,csum=on,mq=on,vectors=%d,packed=on" % (2*self.queues+2)
         if mode == 0:
             vm_params['opt_settings'] = "disable-modern=true," + opt_param
         elif mode == 1:
@@ -311,6 +311,19 @@ class TestVhostVirtioPmdInterrupt(TestCase):
         self.launch_l3fwd_power_in_vm()
         self.send_and_verify()
 
+    def test_perf_packed_ring_virtio_interrupt_with_16queues(self):
+        """
+        wake up virtio_user 0.95 core with l3fwd-power sample
+        """
+        self.queues = 16
+        self.nb_cores = 16
+        self.start_testpmd_on_vhost()
+        self.start_vms(mode=0, packed=True)
+        self.prepare_vm_env()
+        self.launch_l3fwd_power_in_vm()
+        self.send_and_verify()
+
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (6 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: " Xiao Qimai
                   ` (4 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_vhost_virtio_user_interrupt.py | 72 +++++++++++++++++++++-----
 1 file changed, 59 insertions(+), 13 deletions(-)

diff --git a/tests/TestSuite_vhost_virtio_user_interrupt.py b/tests/TestSuite_vhost_virtio_user_interrupt.py
index 610e56e..69d5668 100644
--- a/tests/TestSuite_vhost_virtio_user_interrupt.py
+++ b/tests/TestSuite_vhost_virtio_user_interrupt.py
@@ -89,13 +89,13 @@ class TestVirtioUserInterrupt(TestCase):
         out = self.dut.build_dpdk_apps("./examples/l3fwd-power")
         self.verify("Error" not in out, "compilation l3fwd-power error")
 
-    def launch_l3fwd(self, path):
+    def launch_l3fwd(self, path, packed=False):
         self.core_interrupt = self.core_list_l3fwd[0]
         example_para = "./examples/l3fwd-power/build/l3fwd-power "
-        vdev = " --log-level='user1,7' --vdev=virtio_user0,path=%s,cq=1 -- -p 1" % path
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='l3fwd-pwd', no_pci=True, ports=[self.pci_info])
+        vdev = "virtio_user0,path=%s,cq=1" % path if not packed else "virtio_user0,path=%s,cq=1,packed_vq=1" % path
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='l3fwd-pwd', no_pci=True, ports=[self.pci_info], vdevs=[vdev])
         para = " --config='(0,0,%s)' --parse-ptype" % self.core_interrupt
-        cmd_l3fwd = example_para + eal_params + vdev + para
+        cmd_l3fwd = example_para + eal_params + " --log-level='user1,7' -- -p 1 " + para
         self.l3fwd.get_session_before(timeout=2)
         self.l3fwd.send_expect(cmd_l3fwd, "POWER", 40)
         time.sleep(10)
@@ -110,27 +110,27 @@ class TestVirtioUserInterrupt(TestCase):
         start testpmd on vhost side
         """
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = [r"'net_vhost0,iface=vhost-net,queues=1,client=0'"]
+        vdev = ["net_vhost0,iface=vhost-net,queues=1,client=0"]
         para = " -- -i --rxq=1 --txq=1"
         if len(pci) == 0:
             eal_params = self.dut.create_eal_parameters(cores=self.core_list_vhost, ports=[self.pci_info], vdevs=vdev)
         else:
-            eal_params = self.dut.create_eal_parameters(cores=self.core_list_vhost, prefix='vhost', no_pci=True, ports=[self.pci_info], vdevs=vdev)
+            eal_params = self.dut.create_eal_parameters(cores=self.core_list_vhost, prefix='vhost', no_pci=True, vdevs=vdev)
         cmd_vhost_user = testcmd + eal_params + para
 
         self.vhost.send_expect(cmd_vhost_user, "testpmd>", 30)
         self.vhost.send_expect("set fwd mac", "testpmd>", 30)
         self.vhost.send_expect("start", "testpmd>", 30)
 
-    def start_virtio_user(self):
+    def start_virtio_user(self, packed=False):
         """
         start testpmd on virtio side
         """
         testcmd = self.dut.target + "/app/testpmd "
-        vdev = " --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net"
-        eal_params = self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='virtio', no_pci=True, ports=[self.pci_info])
+        vdev = "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net" if not packed else "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,packed_vq=1"
+        eal_params = self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='virtio', no_pci=True, vdevs=[vdev])
         para = " -- -i --txd=512 --rxd=128 --tx-offloads=0x00"
-        cmd_virtio_user = testcmd + eal_params + vdev + para
+        cmd_virtio_user = testcmd + eal_params + para
         self.virtio.send_expect(cmd_virtio_user, "testpmd>", 120)
         self.virtio.send_expect("set fwd mac", "testpmd>", 20)
         self.virtio.send_expect("start", "testpmd>", 20)
@@ -154,7 +154,7 @@ class TestVirtioUserInterrupt(TestCase):
         else:
             self.logger.error("Wrong link status not right, status is %s" % result)
 
-    def test_interrupt_with_vhost_net_as_backed(self):
+    def test_split_ring_virtio_user_interrupt_with_vhost_net_as_backed(self):
         """
         Check the virtio-user interrupt can work when use vhost-net as backend
         """
@@ -175,7 +175,7 @@ class TestVirtioUserInterrupt(TestCase):
         self.check_interrupt_log(status="waked up")
         self.dut.send_expect("killall -s INT ping", "#")
 
-    def test_interrupt_with_vhost_user_as_backed(self):
+    def test_split_ring_virtio_user_interrupt_with_vhost_user_as_backed(self):
         """
         Check the virtio-user interrupt can work when use vhost-user as backend
         """
@@ -189,7 +189,7 @@ class TestVirtioUserInterrupt(TestCase):
             time.sleep(3)
             self.check_interrupt_log(status="waked up")
 
-    def test_lsc_event_between_vhost_and_virtio_user(self):
+    def test_lsc_event_between_vhost_user_and_virtio_user_with_split_ring(self):
         """
         LSC event between vhost-user and virtio-user
         """
@@ -200,6 +200,52 @@ class TestVirtioUserInterrupt(TestCase):
         self.vhost.send_expect("quit", "#", 20)
         self.check_virtio_side_link_status("down")
 
+    def test_packed_ring_virtio_user_interrupt_with_vhost_user_as_backed(self):
+        """
+        Check the virtio-user interrupt can work when use vhost-user as backend
+        """
+        self.start_vhost_testpmd(pci="")
+        self.launch_l3fwd(path="./vhost-net", packed=True)
+        # double check the status of interrupt core
+        for i in range(2):
+            self.tester.scapy_append('pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]')
+            self.tester.scapy_append('sendp(pk, iface="%s", count=100)' % self.tx_interface)
+            self.tester.scapy_execute()
+            time.sleep(3)
+            self.check_interrupt_log(status="waked up")
+
+    def test_packed_ring_virtio_user_interrupt_with_vhost_net_as_backed(self):
+        """
+        Check the virtio-user interrupt can work when use vhost-net as backend
+        """
+        self.launch_l3fwd(path="/dev/vhost-net", packed=True)
+        self.virtio.send_expect("ifconfig tap0 up", "#", 20)
+        self.virtio.send_expect("ifconfig tap0 1.1.1.2", "#", 20)
+        # start to ping, check the status of interrupt core
+        self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20)
+        time.sleep(3)
+        self.check_interrupt_log(status="waked up")
+        # stop ping, check the status of interrupt core
+        self.dut.send_expect("killall -s INT ping", "#")
+        time.sleep(2)
+        self.check_interrupt_log(status="sleeps")
+        # restart ping, check the status of interrupt core
+        self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20)
+        time.sleep(3)
+        self.check_interrupt_log(status="waked up")
+        self.dut.send_expect("killall -s INT ping", "#")
+
+    def test_lsc_event_between_vhost_user_and_virtio_user_with_packed_ring(self):
+        """
+        LSC event between vhost-user and virtio-user
+        """
+        self.start_vhost_testpmd(pci="--no-pci")
+        self.start_virtio_user(packed=True)
+        self.check_virtio_side_link_status("up")
+
+        self.vhost.send_expect("quit", "#", 20)
+        self.check_virtio_side_link_status("down")
+
     def tear_down(self):
         """
         run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (7 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  2020-03-25  8:10 ` [dts] [PATCH V1 11/11]virtio_pvp_regression: " Xiao Qimai
                   ` (3 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_virtio_event_idx_interrupt.py | 34 ++++++++++++++++++++++++---
 1 file changed, 31 insertions(+), 3 deletions(-)

diff --git a/tests/TestSuite_virtio_event_idx_interrupt.py b/tests/TestSuite_virtio_event_idx_interrupt.py
index 08ad676..bdcca29 100644
--- a/tests/TestSuite_virtio_event_idx_interrupt.py
+++ b/tests/TestSuite_virtio_event_idx_interrupt.py
@@ -100,7 +100,7 @@ class TestVirtioIdxInterrupt(TestCase):
         self.vhost.send_expect(command_line, "testpmd> ", 30)
         self.vhost.send_expect("start", "testpmd> ", 30)
 
-    def start_vms(self):
+    def start_vms(self, packed=False):
         """
         start qemus
         """
@@ -113,6 +113,8 @@ class TestVirtioIdxInterrupt(TestCase):
         if self.queues > 1:
             vm_params['opt_queue'] = self.queues
             opt_args = opt_args + ",mq=on,vectors=%d" % (2*self.queues + 2)
+        if packed:
+            opt_args = opt_args + ',packed=on'
         vm_params['opt_settings'] = opt_args
         self.vm.set_vm_device(**vm_params)
         try:
@@ -224,7 +226,7 @@ class TestVirtioIdxInterrupt(TestCase):
         self.vm.stop()
         self.vhost.send_expect("quit", "#", 20)
 
-    def test_perf_virito_idx_interrupt_with_virtio_pci_driver_reload(self):
+    def test_perf_split_ring_virito_pci_driver_reload(self):
         """
         virtio-pci driver reload test
         """
@@ -237,7 +239,7 @@ class TestVirtioIdxInterrupt(TestCase):
         self.verify(res is True, "Should increase the wait times of ixia")
         self.stop_all_apps()
 
-    def test_perf_virtio_idx_interrupt_with_multi_queue(self):
+    def test_perf_wake_up_split_ring_virtio_net_cores_with_event_idx_interrupt_mode_16queue(self):
         """
         wake up virtio-net cores with event idx interrupt mode 16 queues test
         """
@@ -250,6 +252,32 @@ class TestVirtioIdxInterrupt(TestCase):
         self.check_each_queue_has_packets_info_on_vhost()
         self.stop_all_apps()
 
+    def test_perf_packed_ring_virito_pci_driver_reload(self):
+        """
+        virtio-pci driver reload test
+        """
+        self.queues = 1
+        self.nb_cores = 1
+        self.start_vhost_testpmd()
+        self.start_vms(packed=True)
+        self.config_virito_net_in_vm()
+        res = self.check_packets_after_reload_virtio_device(reload_times=30)
+        self.verify(res is True, "Should increase the wait times of ixia")
+        self.stop_all_apps()
+
+    def test_perf_wake_up_packed_ring_virtio_net_cores_with_event_idx_interrupt_mode_16queue(self):
+        """
+        wake up virtio-net cores with event idx interrupt mode 16 queues test
+        """
+        self.queues = 16
+        self.nb_cores = 16
+        self.start_vhost_testpmd()
+        self.start_vms(packed=True)
+        self.config_virito_net_in_vm()
+        self.start_to_send_packets(delay=15)
+        self.check_each_queue_has_packets_info_on_vhost()
+        self.stop_all_apps()
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dts] [PATCH V1 11/11]virtio_pvp_regression: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (8 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: " Xiao Qimai
@ 2020-03-25  8:10 ` Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  2020-03-25  8:22 ` [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: " Wang, Yinan
                   ` (2 subsequent siblings)
  12 siblings, 2 replies; 34+ messages in thread
From: Xiao Qimai @ 2020-03-25  8:10 UTC (permalink / raw)
  To: dts; +Cc: Xiao Qimai

Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
---
 tests/TestSuite_virtio_pvp_regression.py | 34 ++++++++++++++++++++++++++------
 1 file changed, 28 insertions(+), 6 deletions(-)

diff --git a/tests/TestSuite_virtio_pvp_regression.py b/tests/TestSuite_virtio_pvp_regression.py
index 97e522c..42aceae 100644
--- a/tests/TestSuite_virtio_pvp_regression.py
+++ b/tests/TestSuite_virtio_pvp_regression.py
@@ -198,7 +198,7 @@ class TestVirtioPVPRegression(TestCase):
                 if 'cpupin' in list(self.vm.params[i]['cpu'][0].keys()):
                     self.vm.params[i]['cpu'][0].pop('cpupin')
 
-    def start_vm(self, qemu_path, qemu_version, modem, virtio_path):
+    def start_vm(self, qemu_path, qemu_version, modem, virtio_path, packed=False):
         """
         start vm
         """
@@ -227,6 +227,8 @@ class TestVirtioPVPRegression(TestCase):
             opt_args = 'disable-modern=false,' + opt_args
         elif(modem == 0):
             opt_args = 'disable-modern=true,' + opt_args
+        if packed:
+            opt_args = opt_args + ',packed=on'
         vm_params['opt_settings'] = opt_args
         self.vm.set_vm_device(**vm_params)
         self.vm.load_config()
@@ -344,7 +346,7 @@ class TestVirtioPVPRegression(TestCase):
         self.dut.send_expect('killall -s INT qemu-system-x86_64', '# ')
         self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
 
-    def pvp_regression_run(self, case_info, modem, virtio_path):
+    def pvp_regression_run(self, case_info, modem, virtio_path, packed=False):
         """
         run different qemu verssion on different virtio path of pvp regression
         modem = 0, start vm as virtio 0.95
@@ -357,7 +359,7 @@ class TestVirtioPVPRegression(TestCase):
             version = self.qemu_list[i]["version"]
             self.start_testpmd_as_vhost()
             # use different modem and different path to start vm
-            self.start_vm(path, version, modem, virtio_path)
+            self.start_vm(path, version, modem, virtio_path, packed=packed)
             self.start_testpmd_in_vm(virtio_path)
             self.logger.info("now testing the qemu path of %s" % path)
             time.sleep(5)
@@ -370,7 +372,7 @@ class TestVirtioPVPRegression(TestCase):
 
             self.logger.info('now reconnect from vm')
             self.dut.send_expect('killall -s INT qemu-system-x86_64', '# ')
-            self.start_vm(path, version, modem, virtio_path)
+            self.start_vm(path, version, modem, virtio_path, packed=packed)
             self.start_testpmd_in_vm(virtio_path)
             self.send_verify(case_info, version, "reconnect from vm")
 
@@ -397,7 +399,7 @@ class TestVirtioPVPRegression(TestCase):
         virtio_path = 'mergeable'
         self.pvp_regression_run(case_info, modem, virtio_path)
 
-    def test_perf_pvp_regression_normal_path(self):
+    def test_perf_pvp_regression_non_mergeable_path(self):
         """
         Test the performance of one vm with virtio 0.95 on normal path
         diff qemu + multi queue + reconnect
@@ -407,7 +409,7 @@ class TestVirtioPVPRegression(TestCase):
         virtio_path = 'normal'
         self.pvp_regression_run(case_info, modem, virtio_path)
 
-    def test_perf_pvp_regression_modern_normal_path(self):
+    def test_perf_pvp_regression_modern_non_mergeable_path(self):
         """
         Test the performance of one vm with virtio 1.0 on normal path
         diff qemu + multi queue + reconnect
@@ -437,6 +439,26 @@ class TestVirtioPVPRegression(TestCase):
         virtio_path = 'vector_rx'
         self.pvp_regression_run(case_info, modem, virtio_path)
 
+    def test_perf_pvp_with_virtio11_mergeable_path(self):
+        """
+        Test the performance of one vm with virtio 1.1 on mergeable path
+        diff qemu + multi queue + reconnect
+        """
+        case_info = 'virtio-1.1 mergeable'
+        modem = 1
+        virtio_path = 'mergeable'
+        self.pvp_regression_run(case_info, modem, virtio_path, packed=True)
+
+    def test_perf_pvp_with_virtio11_non_mergeable_path(self):
+        """
+        Test the performance of one vm with virtio 1.1 on mergeable path
+        diff qemu + multi queue + reconnect
+        """
+        case_info = 'virtio-1.1 normal'
+        modem = 1
+        virtio_path = 'normal'
+        self.pvp_regression_run(case_info, modem, virtio_path, packed=True)
+
     def tear_down(self):
         """
         Run after each test case.
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: " Xiao Qimai
@ 2020-03-25  8:20   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:20 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: update script
> according to testplan's update
>
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_pvp_virtio_user_4k_pages.py | 28
> ++++++++++++++++++++--------
>  1 file changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/tests/TestSuite_pvp_virtio_user_4k_pages.py
> b/tests/TestSuite_pvp_virtio_user_4k_pages.py
> index 5e6f657..532b6c1 100644
> --- a/tests/TestSuite_pvp_virtio_user_4k_pages.py
> +++ b/tests/TestSuite_pvp_virtio_user_4k_pages.py
> @@ -130,21 +130,21 @@ class TestPvpVirtioUser4kPages(TestCase):
>          Start testpmd on vhost
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " -m 1024 --no-huge --vdev
> 'net_vhost0,iface=vhost-net,queues=1'"
> +        vdev = 'net_vhost0,iface=vhost-net,queues=1'
>          para = " -- -i --no-numa --socket-num=%d" % self.ports_socket
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost_user, prefix='vhost',
> ports=[self.pci_info])
> -        command_line_client = testcmd + eal_params + vdev + para
> +        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost_user, prefix='vhost',
> ports=[self.pci_info], vdevs=[vdev])
> +        command_line_client = testcmd + eal_params + ' -m 1024
> + --no-huge' + para
>          self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
>          self.vhost_user.send_expect("start", "testpmd> ", 120)
>
> -    def start_testpmd_as_virtio(self):
> +    def start_testpmd_as_virtio(self, packed=False):
>          """
>          Start testpmd on virtio
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " --no-huge -m 1024
> --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1 --
> -i"
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user,
> prefix='virtio-user', ports=[self.pci_info])
> -        command_line_user = testcmd + eal_params + vdev
> +        vdev =
> "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1" if not
> packed else
> "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,packed_vq=1,queu
> es=1"
> +        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user,
> prefix='virtio-user', ports=[self.pci_info], vdevs=[vdev])
> +        command_line_user = testcmd + eal_params + ' --no-huge -m 1024 --
> -i'
>          self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
>          self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
>          self.virtio_user.send_expect("start", "testpmd> ", 120) @@ -168,7
> +168,7 @@ class TestPvpVirtioUser4kPages(TestCase):
>          self.dut.close_session(self.vhost_user)
>          self.dut.close_session(self.virtio_user)
>
> -    def test_perf_pvp_virtio_user_with_4K_pages(self):
> +    def test_perf_pvp_virtio_user_split_ring_with_4K_pages(self):
>          """
>          Basic test for virtio-user 4K pages
>          """
> @@ -179,6 +179,18 @@ class TestPvpVirtioUser4kPages(TestCase):
>          self.result_table_print()
>          self.close_all_apps()
>
> +    def test_perf_pvp_virtio_user_packed_ring_with_4K_pages(self):
> +        """
> +        Basic test for virtio-user 4K pages
> +        """
> +        self.start_testpmd_as_vhost()
> +        self.prepare_tmpfs_for_4k()
> +        self.start_testpmd_as_virtio(packed=True)
> +        self.send_and_verify()
> +        self.result_table_print()
> +        self.close_all_apps()
> +
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: " Xiao Qimai
@ 2020-03-25  8:21   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:21 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_event_idx_interrupt.py | 52
> +++++++++++++++++++++++++---
>  1 file changed, 47 insertions(+), 5 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_event_idx_interrupt.py
> b/tests/TestSuite_vhost_event_idx_interrupt.py
> index fb7da60..5a9f932 100644
> --- a/tests/TestSuite_vhost_event_idx_interrupt.py
> +++ b/tests/TestSuite_vhost_event_idx_interrupt.py
> @@ -161,7 +161,7 @@ class TestVhostEventIdxInterrupt(TestCase):
>                      'This qemu version should greater than 2.7 ' + \
>                      'in this suite, please config it in vhost_sample.cfg file')
> 
> -    def start_vms(self, vm_num=1):
> +    def start_vms(self, vm_num=1, packed=False):
>          """
>          start qemus
>          """
> @@ -178,6 +178,8 @@ class TestVhostEventIdxInterrupt(TestCase):
>                  opt_args = "csum=on,mq=on,vectors=%d" %
> (2*self.queues + 2)
>              else:
>                  opt_args = "csum=on"
> +            if packed:
> +                opt_args = opt_args + ',packed=on'
>              vm_params['opt_settings'] = opt_args
>              vm_info.set_vm_device(**vm_params)
>              self.set_vm_cpu_number(vm_info) @@ -250,9 +252,9 @@
> class TestVhostEventIdxInterrupt(TestCase):
>          """
>          for i in range(len(self.vm)):
>              self.vm[i].stop()
> -        self.vhost.send_expect("^c", "#", 10)
> +        self.dut.send_expect("killall l3fwd-power", "#", timeout=2)
> 
> -    def test_vhost_idx_interrupt(self):
> +    def
> test_wake_up_split_ring_vhost_user_core_with_event_idx_interrupt(self):
>          """
>          wake up vhost-user core with l3fwd-power sample
>          """
> @@ -265,7 +267,7 @@ class TestVhostEventIdxInterrupt(TestCase):
>          self.send_and_verify()
>          self.stop_all_apps()
> 
> -    def test_vhost_idx_interrupt_with_multi_queue(self):
> +    def
> test_wake_up_split_ring_vhost_user_cores_with_event_idx_interrupt_mode_1
> 6_queues(self):
>          """
>          wake up vhost-user core with l3fwd-power sample when multi
> queues are enabled
>          """
> @@ -279,7 +281,7 @@ class TestVhostEventIdxInterrupt(TestCase):
>          self.send_and_verify()
>          self.stop_all_apps()
> 
> -    def test_vhost_idx_interrupt_with_multi_vms(self):
> +    def
> test_wake_up_split_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_
> event_idx_interrupt(self):
>          """
>          wake up vhost-user cores with l3fwd-power sample and multi VMs
>          """
> @@ -292,6 +294,46 @@ class TestVhostEventIdxInterrupt(TestCase):
>          self.send_and_verify()
>          self.stop_all_apps()
> 
> +    def
> test_wake_up_packed_ring_vhost_user_core_with_event_idx_interrupt(self):
> +        """
> +        wake up vhost-user core with l3fwd-power sample
> +        """
> +        self.vm_num = 1
> +        self.queues = 1
> +        self.get_core_mask()
> +        self.lanuch_l3fwd_power()
> +        self.start_vms(vm_num=self.vm_num, packed=True)
> +        self.relanuch_l3fwd_power()
> +        self.send_and_verify()
> +        self.stop_all_apps()
> +
> +    def
> test_wake_up_packed_ring_vhost_user_cores_with_event_idx_interrupt_mod
> e_16_queues(self):
> +        """
> +        wake up vhost-user core with l3fwd-power sample when multi
> queues are enabled
> +        """
> +        self.vm_num = 1
> +        self.queues = 16
> +        self.get_core_mask()
> +        self.lanuch_l3fwd_power()
> +        self.start_vms(vm_num=self.vm_num, packed=True)
> +        self.relanuch_l3fwd_power()
> +        self.config_virito_net_in_vm()
> +        self.send_and_verify()
> +        self.stop_all_apps()
> +
> +    def
> test_wake_up_packed_ring_vhost_user_cores_by_multi_virtio_net_in_vms_wi
> th_event_idx_interrupt(self):
> +        """
> +        wake up vhost-user cores with l3fwd-power sample and multi VMs
> +        """
> +        self.vm_num = 2
> +        self.queues = 1
> +        self.get_core_mask()
> +        self.lanuch_l3fwd_power()
> +        self.start_vms(vm_num=self.vm_num, packed=True)
> +        self.relanuch_l3fwd_power()
> +        self.send_and_verify()
> +        self.stop_all_apps()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: " Xiao Qimai
@ 2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_virtio_pmd_interrupt.py | 19 ++++++++++++++++---
>  1 file changed, 16 insertions(+), 3 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> index 5e6437e..ca69932 100644
> --- a/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> +++ b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> @@ -117,7 +117,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>          # get the core list depend on current nb_cores number
>          self.get_core_list()
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'net_vhost0,iface=%s/vhost-net,queues=%d'" %
> (self.base_dir, self.queues)]
> +        vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' %
> + (self.base_dir, self.queues)]
>          eal_params = self.dut.create_eal_parameters(cores=self.core_list,
> ports=[self.pci_info], vdevs=vdev)
>          para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --rss-ip" %
> (self.nb_cores, self.queues, self.queues)
>          command_line_client = testcmd + eal_params + para @@ -166,7
> +166,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>              if list(self.vm.params[i].keys())[0] == 'cpu':
>                  self.vm.params[i]['cpu'][0]['number'] = self.queues
> 
> -    def start_vms(self, mode=0):
> +    def start_vms(self, mode=0, packed=False):
>          """
>          start qemus
>          """
> @@ -177,7 +177,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>          vm_params['opt_path'] = '%s/vhost-net' % self.base_dir
>          vm_params['opt_mac'] = "00:11:22:33:44:55"
>          vm_params['opt_queue'] = self.queues
> -        opt_param = "mrg_rxbuf=on,csum=on,mq=on,vectors=%d" %
> (2*self.queues+2)
> +        opt_param = "mrg_rxbuf=on,csum=on,mq=on,vectors=%d" %
> + (2*self.queues+2) if not packed else
> + "mrg_rxbuf=on,csum=on,mq=on,vectors=%d,packed=on" % (2*self.queues+2)
>          if mode == 0:
>              vm_params['opt_settings'] = "disable-modern=true," +
> opt_param
>          elif mode == 1:
> @@ -311,6 +311,19 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>          self.launch_l3fwd_power_in_vm()
>          self.send_and_verify()
> 
> +    def test_perf_packed_ring_virtio_interrupt_with_16queues(self):
> +        """
> +        wake up virtio_user 0.95 core with l3fwd-power sample
> +        """
> +        self.queues = 16
> +        self.nb_cores = 16
> +        self.start_testpmd_on_vhost()
> +        self.start_vms(mode=0, packed=True)
> +        self.prepare_vm_env()
> +        self.launch_l3fwd_power_in_vm()
> +        self.send_and_verify()
> +
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 11/11]virtio_pvp_regression: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 11/11]virtio_pvp_regression: " Xiao Qimai
@ 2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 11/11]virtio_pvp_regression: update script according to
> testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_virtio_pvp_regression.py | 34
> ++++++++++++++++++++++++++------
>  1 file changed, 28 insertions(+), 6 deletions(-)
> 
> diff --git a/tests/TestSuite_virtio_pvp_regression.py
> b/tests/TestSuite_virtio_pvp_regression.py
> index 97e522c..42aceae 100644
> --- a/tests/TestSuite_virtio_pvp_regression.py
> +++ b/tests/TestSuite_virtio_pvp_regression.py
> @@ -198,7 +198,7 @@ class TestVirtioPVPRegression(TestCase):
>                  if 'cpupin' in list(self.vm.params[i]['cpu'][0].keys()):
>                      self.vm.params[i]['cpu'][0].pop('cpupin')
> 
> -    def start_vm(self, qemu_path, qemu_version, modem, virtio_path):
> +    def start_vm(self, qemu_path, qemu_version, modem, virtio_path,
> packed=False):
>          """
>          start vm
>          """
> @@ -227,6 +227,8 @@ class TestVirtioPVPRegression(TestCase):
>              opt_args = 'disable-modern=false,' + opt_args
>          elif(modem == 0):
>              opt_args = 'disable-modern=true,' + opt_args
> +        if packed:
> +            opt_args = opt_args + ',packed=on'
>          vm_params['opt_settings'] = opt_args
>          self.vm.set_vm_device(**vm_params)
>          self.vm.load_config()
> @@ -344,7 +346,7 @@ class TestVirtioPVPRegression(TestCase):
>          self.dut.send_expect('killall -s INT qemu-system-x86_64', '# ')
>          self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
> 
> -    def pvp_regression_run(self, case_info, modem, virtio_path):
> +    def pvp_regression_run(self, case_info, modem, virtio_path,
> packed=False):
>          """
>          run different qemu verssion on different virtio path of pvp regression
>          modem = 0, start vm as virtio 0.95 @@ -357,7 +359,7 @@ class
> TestVirtioPVPRegression(TestCase):
>              version = self.qemu_list[i]["version"]
>              self.start_testpmd_as_vhost()
>              # use different modem and different path to start vm
> -            self.start_vm(path, version, modem, virtio_path)
> +            self.start_vm(path, version, modem, virtio_path,
> + packed=packed)
>              self.start_testpmd_in_vm(virtio_path)
>              self.logger.info("now testing the qemu path of %s" % path)
>              time.sleep(5)
> @@ -370,7 +372,7 @@ class TestVirtioPVPRegression(TestCase):
> 
>              self.logger.info('now reconnect from vm')
>              self.dut.send_expect('killall -s INT qemu-system-x86_64', '# ')
> -            self.start_vm(path, version, modem, virtio_path)
> +            self.start_vm(path, version, modem, virtio_path,
> + packed=packed)
>              self.start_testpmd_in_vm(virtio_path)
>              self.send_verify(case_info, version, "reconnect from vm")
> 
> @@ -397,7 +399,7 @@ class TestVirtioPVPRegression(TestCase):
>          virtio_path = 'mergeable'
>          self.pvp_regression_run(case_info, modem, virtio_path)
> 
> -    def test_perf_pvp_regression_normal_path(self):
> +    def test_perf_pvp_regression_non_mergeable_path(self):
>          """
>          Test the performance of one vm with virtio 0.95 on normal path
>          diff qemu + multi queue + reconnect @@ -407,7 +409,7 @@ class
> TestVirtioPVPRegression(TestCase):
>          virtio_path = 'normal'
>          self.pvp_regression_run(case_info, modem, virtio_path)
> 
> -    def test_perf_pvp_regression_modern_normal_path(self):
> +    def test_perf_pvp_regression_modern_non_mergeable_path(self):
>          """
>          Test the performance of one vm with virtio 1.0 on normal path
>          diff qemu + multi queue + reconnect @@ -437,6 +439,26 @@ class
> TestVirtioPVPRegression(TestCase):
>          virtio_path = 'vector_rx'
>          self.pvp_regression_run(case_info, modem, virtio_path)
> 
> +    def test_perf_pvp_with_virtio11_mergeable_path(self):
> +        """
> +        Test the performance of one vm with virtio 1.1 on mergeable path
> +        diff qemu + multi queue + reconnect
> +        """
> +        case_info = 'virtio-1.1 mergeable'
> +        modem = 1
> +        virtio_path = 'mergeable'
> +        self.pvp_regression_run(case_info, modem, virtio_path,
> + packed=True)
> +
> +    def test_perf_pvp_with_virtio11_non_mergeable_path(self):
> +        """
> +        Test the performance of one vm with virtio 1.1 on mergeable path
> +        diff qemu + multi queue + reconnect
> +        """
> +        case_info = 'virtio-1.1 normal'
> +        modem = 1
> +        virtio_path = 'normal'
> +        self.pvp_regression_run(case_info, modem, virtio_path,
> + packed=True)
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (9 preceding siblings ...)
  2020-03-25  8:10 ` [dts] [PATCH V1 11/11]virtio_pvp_regression: " Xiao Qimai
@ 2020-03-25  8:22 ` Wang, Yinan
  2020-03-31  3:00 ` Xiao, QimaiX
  2020-03-31  3:06 ` Tu, Lijuan
  12 siblings, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update
> script according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  .../TestSuite_loopback_multi_paths_port_restart.py | 85
> +++++++++++++++-------
>  1 file changed, 57 insertions(+), 28 deletions(-)
> 
> diff --git a/tests/TestSuite_loopback_multi_paths_port_restart.py
> b/tests/TestSuite_loopback_multi_paths_port_restart.py
> index 0b54b8d..5f1faad 100644
> --- a/tests/TestSuite_loopback_multi_paths_port_restart.py
> +++ b/tests/TestSuite_loopback_multi_paths_port_restart.py
> @@ -121,15 +121,21 @@ class TestLoopbackPortRestart(TestCase):
>                  break
>              time.sleep(3)
>              loop = loop + 1
> -
>          self.verify("down" not in port_status, "port can not up after restart")
> 
> -    def port_restart(self):
> -        self.vhost.send_expect("stop", "testpmd> ", 120)
> -        self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> -        self.check_port_throughput_after_port_stop()
> -        self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> -        self.vhost.send_expect("port start all", "testpmd> ", 120)
> +    def port_restart(self, restart_times=1):
> +        if restart_times == 1:
> +            self.vhost.send_expect("stop", "testpmd> ", 120)
> +            self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> +            self.check_port_throughput_after_port_stop()
> +            self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> +            self.vhost.send_expect("port start all", "testpmd> ", 120)
> +        else:
> +            for i in range(restart_times):
> +                self.vhost.send_expect("stop", "testpmd> ", 120)
> +                self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> +                self.vhost.send_expect("clear port stats all", "testpmd> ",
> 120)
> +                self.vhost.send_expect("port start all", "testpmd> ",
> + 120)
>          self.check_port_link_status_after_port_restart()
>          self.vhost.send_expect("set burst 1", "testpmd> ", 120)
>          self.vhost.send_expect("start tx_first 1", "testpmd> ", 120) @@
> -156,7 +162,7 @@ class TestLoopbackPortRestart(TestCase):
>          self.verify(Mpps > 0, "%s can not receive packets" %
> self.running_case)
>          return Mpps
> 
> -    def send_and_verify(self, case_info, frame_size):
> +    def send_and_verify(self, case_info, frame_size, restart_times=1):
>          """
>          start to send packets and calculate the average throughput
>          """
> @@ -166,7 +172,7 @@ class TestLoopbackPortRestart(TestCase):
>          Mpps = self.calculate_avg_throughput()
>          self.update_table_info(case_info, frame_size, Mpps, "Before
> Restart")
> 
> -        self.port_restart()
> +        self.port_restart(restart_times)
>          Mpps = self.calculate_avg_throughput()
>          self.update_table_info(case_info, frame_size, Mpps, "After Restart
> and set burst to 1")
> 
> @@ -184,7 +190,7 @@ class TestLoopbackPortRestart(TestCase):
>          self.dut.close_session(self.vhost)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_vhost_loopback_virtio11_mergeable_mac(self):
> +    def test_loopback_test_with_packed_ring_mergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on virtio1.1
> mergeable path
>          """
> @@ -193,11 +199,11 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtio1.1 mergeable", frame_size)
> +            self.send_and_verify("packed ring mergeable", frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtio11_normal_mac(self):
> +    def test_loopback_test_with_packed_ring_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port ob virtio1.1 normal
> path
>          """
> @@ -206,63 +212,86 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtio1.1 normal", frame_size)
> +            self.send_and_verify("packed ring non-mergeable", frame_size)
> +            self.close_all_testpmd()
> +        self.result_table_print()
> +
> +    def
> test_lookback_test_with_packed_ring_inorder_mergeable_path(self):
> +        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=1,in_order=1",
> +                   "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +        for frame_size in self.frame_sizes:
> +            self.start_vhost_testpmd()
> +            self.start_virtio_user_testpmd(pmd_arg)
> +            self.send_and_verify("packed ring non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_inorder_mergeable_mac(self):
> +    def
> test_lookback_test_with_packed_ring_inorder_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on inorder mergeable
> path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1 ",
> -                          "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip "}
> +        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=0,in_order=1",
> +                          "path": "--tx-offloads=0x0
> + --enable-hw-vlan-strip"}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("inorder mergeable", frame_size)
> +            self.send_and_verify("packed ring inorder non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_inorder_mergeable_off_mac(self):
> +    def test_lookback_test_with_split_ring_inorder_mergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on inorder normal
> path
>          """
> +        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> +                          "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip "}
> +        for frame_size in self.frame_sizes:
> +            self.start_vhost_testpmd()
> +            self.start_virtio_user_testpmd(pmd_arg)
> +            self.send_and_verify("split ring inorder mergeable", frame_size)
> +            self.close_all_testpmd()
> +        self.result_table_print()
> +
> +    def
> test_lookback_test_with_split_ring_inorder_nonmergeable_path(self):
> +        """
> +        performance for [frame_sizes] and restart port on virtio normal path
> +        """
>          pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0 ",
>                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("inorder normal", frame_size)
> +            self.send_and_verify("split ring inorder non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_mergeable_mac(self):
> +    def test_lookback_test_with_split_ring_mergeable_path(self):
>          """
> -        performance for [frame_sizes] and restart port on virtio mergeable
> path
> +        performance for [frame_sizes] and restart port on virtio normal
> + path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1 ",
> +        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
>                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser mergeable", frame_size)
> +            self.send_and_verify("split ring mergeable", frame_size,
> + restart_times=100)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_normal_mac(self):
> +    def test_lookback_test_with_split_ring_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on virtio normal path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0 ",
> +        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
>                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser normal", frame_size)
> +            self.send_and_verify("split ring non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_vector_rx_mac(self):
> +    def test_loopback_test_with_split_ring_vector_rx_path(self):
>          """
>          performance for frame_sizes and restart port on virtio vector rx
>          """
> @@ -271,7 +300,7 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser vector_rx", frame_size)
> +            self.send_and_verify("split ring vector_rx", frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: " Xiao Qimai
@ 2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_virtio_user_interrupt.py | 72
> +++++++++++++++++++++-----
>  1 file changed, 59 insertions(+), 13 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_virtio_user_interrupt.py
> b/tests/TestSuite_vhost_virtio_user_interrupt.py
> index 610e56e..69d5668 100644
> --- a/tests/TestSuite_vhost_virtio_user_interrupt.py
> +++ b/tests/TestSuite_vhost_virtio_user_interrupt.py
> @@ -89,13 +89,13 @@ class TestVirtioUserInterrupt(TestCase):
>          out = self.dut.build_dpdk_apps("./examples/l3fwd-power")
>          self.verify("Error" not in out, "compilation l3fwd-power error")
> 
> -    def launch_l3fwd(self, path):
> +    def launch_l3fwd(self, path, packed=False):
>          self.core_interrupt = self.core_list_l3fwd[0]
>          example_para = "./examples/l3fwd-power/build/l3fwd-power "
> -        vdev = " --log-level='user1,7' --vdev=virtio_user0,path=%s,cq=1 -- -p
> 1" % path
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='l3fwd-pwd',
> no_pci=True, ports=[self.pci_info])
> +        vdev = "virtio_user0,path=%s,cq=1" % path if not packed else
> "virtio_user0,path=%s,cq=1,packed_vq=1" % path
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_l3fwd,
> + prefix='l3fwd-pwd', no_pci=True, ports=[self.pci_info], vdevs=[vdev])
>          para = " --config='(0,0,%s)' --parse-ptype" % self.core_interrupt
> -        cmd_l3fwd = example_para + eal_params + vdev + para
> +        cmd_l3fwd = example_para + eal_params + " --log-level='user1,7'
> + -- -p 1 " + para
>          self.l3fwd.get_session_before(timeout=2)
>          self.l3fwd.send_expect(cmd_l3fwd, "POWER", 40)
>          time.sleep(10)
> @@ -110,27 +110,27 @@ class TestVirtioUserInterrupt(TestCase):
>          start testpmd on vhost side
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'net_vhost0,iface=vhost-net,queues=1,client=0'"]
> +        vdev = ["net_vhost0,iface=vhost-net,queues=1,client=0"]
>          para = " -- -i --rxq=1 --txq=1"
>          if len(pci) == 0:
>              eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost, ports=[self.pci_info],
> vdevs=vdev)
>          else:
> -            eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost, prefix='vhost',
> no_pci=True, ports=[self.pci_info], vdevs=vdev)
> +            eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_vhost,
> + prefix='vhost', no_pci=True, vdevs=vdev)
>          cmd_vhost_user = testcmd + eal_params + para
> 
>          self.vhost.send_expect(cmd_vhost_user, "testpmd>", 30)
>          self.vhost.send_expect("set fwd mac", "testpmd>", 30)
>          self.vhost.send_expect("start", "testpmd>", 30)
> 
> -    def start_virtio_user(self):
> +    def start_virtio_user(self, packed=False):
>          """
>          start testpmd on virtio side
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = "
> --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net"
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='virtio',
> no_pci=True, ports=[self.pci_info])
> +        vdev = "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net"
> if not packed else
> "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,packed_vq=1"
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_l3fwd,
> + prefix='virtio', no_pci=True, vdevs=[vdev])
>          para = " -- -i --txd=512 --rxd=128 --tx-offloads=0x00"
> -        cmd_virtio_user = testcmd + eal_params + vdev + para
> +        cmd_virtio_user = testcmd + eal_params + para
>          self.virtio.send_expect(cmd_virtio_user, "testpmd>", 120)
>          self.virtio.send_expect("set fwd mac", "testpmd>", 20)
>          self.virtio.send_expect("start", "testpmd>", 20) @@ -154,7 +154,7
> @@ class TestVirtioUserInterrupt(TestCase):
>          else:
>              self.logger.error("Wrong link status not right, status is %s" %
> result)
> 
> -    def test_interrupt_with_vhost_net_as_backed(self):
> +    def
> test_split_ring_virtio_user_interrupt_with_vhost_net_as_backed(self):
>          """
>          Check the virtio-user interrupt can work when use vhost-net as
> backend
>          """
> @@ -175,7 +175,7 @@ class TestVirtioUserInterrupt(TestCase):
>          self.check_interrupt_log(status="waked up")
>          self.dut.send_expect("killall -s INT ping", "#")
> 
> -    def test_interrupt_with_vhost_user_as_backed(self):
> +    def
> test_split_ring_virtio_user_interrupt_with_vhost_user_as_backed(self):
>          """
>          Check the virtio-user interrupt can work when use vhost-user as
> backend
>          """
> @@ -189,7 +189,7 @@ class TestVirtioUserInterrupt(TestCase):
>              time.sleep(3)
>              self.check_interrupt_log(status="waked up")
> 
> -    def test_lsc_event_between_vhost_and_virtio_user(self):
> +    def
> test_lsc_event_between_vhost_user_and_virtio_user_with_split_ring(self):
>          """
>          LSC event between vhost-user and virtio-user
>          """
> @@ -200,6 +200,52 @@ class TestVirtioUserInterrupt(TestCase):
>          self.vhost.send_expect("quit", "#", 20)
>          self.check_virtio_side_link_status("down")
> 
> +    def
> test_packed_ring_virtio_user_interrupt_with_vhost_user_as_backed(self):
> +        """
> +        Check the virtio-user interrupt can work when use vhost-user as
> backend
> +        """
> +        self.start_vhost_testpmd(pci="")
> +        self.launch_l3fwd(path="./vhost-net", packed=True)
> +        # double check the status of interrupt core
> +        for i in range(2):
> +
> self.tester.scapy_append('pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]')
> +            self.tester.scapy_append('sendp(pk, iface="%s", count=100)' %
> self.tx_interface)
> +            self.tester.scapy_execute()
> +            time.sleep(3)
> +            self.check_interrupt_log(status="waked up")
> +
> +    def
> test_packed_ring_virtio_user_interrupt_with_vhost_net_as_backed(self):
> +        """
> +        Check the virtio-user interrupt can work when use vhost-net as
> backend
> +        """
> +        self.launch_l3fwd(path="/dev/vhost-net", packed=True)
> +        self.virtio.send_expect("ifconfig tap0 up", "#", 20)
> +        self.virtio.send_expect("ifconfig tap0 1.1.1.2", "#", 20)
> +        # start to ping, check the status of interrupt core
> +        self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20)
> +        time.sleep(3)
> +        self.check_interrupt_log(status="waked up")
> +        # stop ping, check the status of interrupt core
> +        self.dut.send_expect("killall -s INT ping", "#")
> +        time.sleep(2)
> +        self.check_interrupt_log(status="sleeps")
> +        # restart ping, check the status of interrupt core
> +        self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20)
> +        time.sleep(3)
> +        self.check_interrupt_log(status="waked up")
> +        self.dut.send_expect("killall -s INT ping", "#")
> +
> +    def
> test_lsc_event_between_vhost_user_and_virtio_user_with_packed_ring(self):
> +        """
> +        LSC event between vhost-user and virtio-user
> +        """
> +        self.start_vhost_testpmd(pci="--no-pci")
> +        self.start_virtio_user(packed=True)
> +        self.check_virtio_side_link_status("up")
> +
> +        self.vhost.send_expect("quit", "#", 20)
> +        self.check_virtio_side_link_status("down")
> +
>      def tear_down(self):
>          """
>          run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: " Xiao Qimai
@ 2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_virtio_event_idx_interrupt.py | 34
> ++++++++++++++++++++++++---
>  1 file changed, 31 insertions(+), 3 deletions(-)
> 
> diff --git a/tests/TestSuite_virtio_event_idx_interrupt.py
> b/tests/TestSuite_virtio_event_idx_interrupt.py
> index 08ad676..bdcca29 100644
> --- a/tests/TestSuite_virtio_event_idx_interrupt.py
> +++ b/tests/TestSuite_virtio_event_idx_interrupt.py
> @@ -100,7 +100,7 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.vhost.send_expect(command_line, "testpmd> ", 30)
>          self.vhost.send_expect("start", "testpmd> ", 30)
> 
> -    def start_vms(self):
> +    def start_vms(self, packed=False):
>          """
>          start qemus
>          """
> @@ -113,6 +113,8 @@ class TestVirtioIdxInterrupt(TestCase):
>          if self.queues > 1:
>              vm_params['opt_queue'] = self.queues
>              opt_args = opt_args + ",mq=on,vectors=%d" % (2*self.queues +
> 2)
> +        if packed:
> +            opt_args = opt_args + ',packed=on'
>          vm_params['opt_settings'] = opt_args
>          self.vm.set_vm_device(**vm_params)
>          try:
> @@ -224,7 +226,7 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.vm.stop()
>          self.vhost.send_expect("quit", "#", 20)
> 
> -    def test_perf_virito_idx_interrupt_with_virtio_pci_driver_reload(self):
> +    def test_perf_split_ring_virito_pci_driver_reload(self):
>          """
>          virtio-pci driver reload test
>          """
> @@ -237,7 +239,7 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.verify(res is True, "Should increase the wait times of ixia")
>          self.stop_all_apps()
> 
> -    def test_perf_virtio_idx_interrupt_with_multi_queue(self):
> +    def
> test_perf_wake_up_split_ring_virtio_net_cores_with_event_idx_interrupt_mo
> de_16queue(self):
>          """
>          wake up virtio-net cores with event idx interrupt mode 16 queues
> test
>          """
> @@ -250,6 +252,32 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.check_each_queue_has_packets_info_on_vhost()
>          self.stop_all_apps()
> 
> +    def test_perf_packed_ring_virito_pci_driver_reload(self):
> +        """
> +        virtio-pci driver reload test
> +        """
> +        self.queues = 1
> +        self.nb_cores = 1
> +        self.start_vhost_testpmd()
> +        self.start_vms(packed=True)
> +        self.config_virito_net_in_vm()
> +        res =
> self.check_packets_after_reload_virtio_device(reload_times=30)
> +        self.verify(res is True, "Should increase the wait times of ixia")
> +        self.stop_all_apps()
> +
> +    def
> test_perf_wake_up_packed_ring_virtio_net_cores_with_event_idx_interrupt_
> mode_16queue(self):
> +        """
> +        wake up virtio-net cores with event idx interrupt mode 16 queues
> test
> +        """
> +        self.queues = 16
> +        self.nb_cores = 16
> +        self.start_vhost_testpmd()
> +        self.start_vms(packed=True)
> +        self.config_virito_net_in_vm()
> +        self.start_to_send_packets(delay=15)
> +        self.check_each_queue_has_packets_info_on_vhost()
> +        self.stop_all_apps()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: " Xiao Qimai
@ 2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_enqueue_interrupt.py | 36
> +++++++++++++++++++++++-------
>  1 file changed, 28 insertions(+), 8 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_enqueue_interrupt.py
> b/tests/TestSuite_vhost_enqueue_interrupt.py
> index 509c7f3..9d0e024 100644
> --- a/tests/TestSuite_vhost_enqueue_interrupt.py
> +++ b/tests/TestSuite_vhost_enqueue_interrupt.py
> @@ -83,14 +83,14 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.core_list_virtio = core_list[0: self.queues+1]
>          self.core_list_l3fwd = core_list[self.queues+1: need_num]
> 
> -    def lanuch_virtio_user(self):
> +    def lanuch_virtio_user(self, packed=False):
>          """
>          launch virtio-user with server mode
>          """
> -        vdev =
> "--vdev=net_virtio_user0,mac=%s,path=./vhost-net,server=1,queues=%d" %
> (self.vmac, self.queues)
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio, prefix='virtio',
> no_pci=True, ports=[self.pci_info])
> +        vdev =
> "net_virtio_user0,mac=%s,path=./vhost-net,server=1,queues=%d" % (self.vmac,
> self.queues) if not packed else
> "net_virtio_user0,mac=%s,path=./vhost-net,server=1,queues=%d,packed_vq=1"
>  % (self.vmac, self.queues)
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_virtio,
> + prefix='virtio', no_pci=True, ports=[self.pci_info], vdevs=[vdev])
>          para = " -- -i --rxq=%d --txq=%d --rss-ip" % (self.queues, self.queues)
> -        command_line_client =  self.dut.target + "/app/testpmd " +
> eal_params + vdev + para
> +        command_line_client =  self.dut.target + "/app/testpmd " +
> + eal_params + para
>          self.virtio_user.send_expect(command_line_client, "testpmd> ", 120)
>          self.virtio_user.send_expect("set fwd txonly", "testpmd> ", 20)
> 
> @@ -109,9 +109,9 @@ class TestVhostEnqueueInterrupt(TestCase):
>              self.verify_info.append(info)
> 
>          example_cmd = "./examples/l3fwd-power/build/l3fwd-power "
> -        vdev = [r"'net_vhost0,iface=vhost-net,queues=%d,client=1'" %
> self.queues]
> +        vdev = 'net_vhost0,iface=vhost-net,queues=%d,client=1' %
> + self.queues
>          para = " -- -p 0x1 --parse-ptype 1 --config '%s' " % config_info
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_l3fwd, no_pci=True,
> ports=[self.pci_info], vdevs=vdev)
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_l3fwd,
> + no_pci=True, ports=[self.pci_info], vdevs=[vdev])
>          command_line_client = example_cmd + eal_params + para
>          self.vhost.get_session_before(timeout=2)
>          self.vhost.send_expect(command_line_client, "POWER", 40) @@
> -156,7 +156,7 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.dut.close_session(self.vhost)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_virtio_user_interrupt(self):
> +    def
> test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample(self):
>          """
>          Check the virtio-user interrupt can work when use vhost-net as
> backend
>          """
> @@ -166,7 +166,7 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.lanuch_l3fwd_power()
>          self.send_and_verify()
> 
> -    def test_virtio_user_interrupt_with_multi_queue(self):
> +    def
> test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample_when_
> multi_queues_enabled(self):
>          """
>          Check the virtio-user interrupt can work with multi queue
>          """
> @@ -176,6 +176,26 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.lanuch_l3fwd_power()
>          self.send_and_verify()
> 
> +    def
> test_wake_up_packed_ring_vhost_user_core_with_l3fwd_power_sample(self):
> +        """
> +        Check the virtio-user interrupt can work when use vhost-net as
> backend
> +        """
> +        self.queues = 1
> +        self.get_core_list()
> +        self.lanuch_virtio_user(packed=True)
> +        self.lanuch_l3fwd_power()
> +        self.send_and_verify()
> +
> +    def
> test_wake_up_packed_ring_vhost_user_core_with_l3fwd_power_sample_whe
> n_multi_queues_enabled(self):
> +        """
> +        Check the virtio-user interrupt can work with multi queue
> +        """
> +        self.queues = 4
> +        self.get_core_list()
> +        self.lanuch_virtio_user(packed=True)
> +        self.lanuch_l3fwd_power()
> +        self.send_and_verify()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 07/11]vhost_user_live_migration: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 07/11]vhost_user_live_migration: " Xiao Qimai
@ 2020-03-25  8:22   ` Wang, Yinan
  2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:22 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 07/11]vhost_user_live_migration: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_user_live_migration.py | 71
> +++++++++++++++++++++++++---
>  1 file changed, 65 insertions(+), 6 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_user_live_migration.py
> b/tests/TestSuite_vhost_user_live_migration.py
> index 5c5db0d..cb6789c 100644
> --- a/tests/TestSuite_vhost_user_live_migration.py
> +++ b/tests/TestSuite_vhost_user_live_migration.py
> @@ -151,7 +151,7 @@ class TestVhostUserLiveMigration(TestCase):
>          if zero_copy is True:
>              zero_copy_str = ',dequeue-zero-copy=1'
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'eth_vhost0,iface=%s/vhost-net,queues=%d%s'" %
> (self.base_dir, self.queue_number, zero_copy_str)]
> +        vdev = ['eth_vhost0,iface=%s/vhost-net,queues=%d%s' %
> + (self.base_dir, self.queue_number, zero_copy_str)]
>          para = " -- -i --nb-cores=%d --rxq=%d --txq=%d" %
> (self.queue_number, self.queue_number, self.queue_number)
>          eal_params_first =
> self.dut.create_eal_parameters(cores=self.core_list0, prefix='vhost',
> ports=[self.host_pci_info], vdevs=vdev)
>          eal_params_secondary =
> self.dut.create_eal_parameters(cores=self.core_list1, prefix='vhost',
> ports=[self.backup_pci_info], vdevs=vdev) @@ -166,7 +166,7 @@ class
> TestVhostUserLiveMigration(TestCase):
>          self.backup_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ',
> 30)
>          self.backup_dut.send_expect('start', 'testpmd> ', 30)
> 
> -    def setup_vm_env_on_both_dut(self, driver='default'):
> +    def setup_vm_env_on_both_dut(self, driver='default', packed=False):
>          """
>          Create testing environment on Host and Backup
>          """
> @@ -183,6 +183,8 @@ class TestVhostUserLiveMigration(TestCase):
>              if self.queue_number > 1:
>                  vhost_params['opt_queue'] = self.queue_number
>                  opt_params = 'mrg_rxbuf=on,mq=on,vectors=%d' %
> (2*self.queue_number + 2)
> +            if packed:
> +                opt_params = opt_params + ',packed=on'
>              vhost_params['opt_settings'] = opt_params
>              self.host_vm.set_vm_device(**vhost_params)
> 
> @@ -385,7 +387,7 @@ class TestVhostUserLiveMigration(TestCase):
>          # make sure still can receive packets
>          verify_fun(self.vm_dut_backup)
> 
> -    def test_migrate_with_virtio_net(self):
> +    def test_migrate_with_split_ring_virtio_net(self):
>          """
>          Verify migrate virtIO device from host to backup host,
>          Verify before/in/after migration, device with kernel driver can
> receive packets @@ -402,7 +404,7 @@ class
> TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_kernel)
> 
> -    def test_migrete_with_vritio_net_with_multi_queue(self):
> +    def
> test_adjust_split_ring_virtio_net_queue_numbers_while_migreting_with_virtio
> _net(self):
>          self.queue_number = 4
>          self.launch_testpmd_as_vhost_on_both_dut()
>          self.start_testpmd_with_fwd_mode_on_both_dut()
> @@ -414,7 +416,7 @@ class TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_kernel, True)
> 
> -    def test_migrate_with_virtio_pmd(self):
> +    def test_migrate_with_split_ring_virtio_pmd(self):
>          self.queue_number = 1
>          self.launch_testpmd_as_vhost_on_both_dut()
>          self.start_testpmd_with_fwd_mode_on_both_dut()
> @@ -426,7 +428,7 @@ class TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_dpdk)
> 
> -    def test_migrate_with_zero_copy_virtio_pmd(self):
> +    def test_migrate_with_split_ring_virtio_pmd_zero_copy(self):
>          self.queue_number = 1
>          zero_copy = True
>          # start testpmd and qemu on dut @@ -442,6 +444,63 @@ class
> TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_dpdk)
> 
> +    def test_migrate_with_packed_ring_virtio_pmd(self):
> +        self.queue_number = 1
> +        self.launch_testpmd_as_vhost_on_both_dut()
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +        self.setup_vm_env_on_both_dut(packed=True)
> +
> +        # bind virtio-net to igb_uio
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
> +        self.start_testpmd_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_dpdk)
> +
> +    def test_migrate_with_packed_ring_virtio_pmd_zero_copy(self):
> +        self.queue_number = 1
> +        zero_copy = True
> +        # start testpmd and qemu on dut
> +        # after qemu start ok, then send 'start' command to testpmd
> +        # if send 'start' command before start qemu, maybe qemu will start
> failed
> +        self.launch_testpmd_as_vhost_on_both_dut(zero_copy)
> +        self.setup_vm_env_on_both_dut(packed=True)
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +
> +        # bind virtio-net to igb_uio
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
> +        self.start_testpmd_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_dpdk)
> +
> +    def test_migrate_with_packed_ring_virtio_net(self):
> +        """
> +        Verify migrate virtIO device from host to backup host,
> +        Verify before/in/after migration, device with kernel driver can
> receive packets
> +        """
> +        self.queue_number = 1
> +        self.launch_testpmd_as_vhost_on_both_dut()
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +        self.setup_vm_env_on_both_dut(packed=True)
> +
> +        # bind virtio-net back to virtio-pci
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
> +        # start screen and tcpdump on vm
> +        self.start_tcpdump_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_kernel)
> +
> +    def
> test_adjust_packed_ring_virtio_net_queue_numbers_while_migreting_with_vi
> rtio_net(self):
> +        self.queue_number = 4
> +        self.launch_testpmd_as_vhost_on_both_dut()
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +        self.setup_vm_env_on_both_dut(packed=True)
> +
> +        # bind virtio-net back to virtio-pci
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
> +        self.start_tcpdump_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_kernel, True)
> +
>      def tear_down(self):
>          self.destroy_vm_env()
>          # stop send packet on tester
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: " Xiao Qimai
@ 2020-03-25  8:23   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:23 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_pvp_virtio_user_2M_hugepages.py | 22
> ++++++++++++++++------
>  1 file changed, 16 insertions(+), 6 deletions(-)
> 
> diff --git a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> index 9d2eaed..ac7187c 100644
> --- a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> +++ b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> @@ -128,20 +128,20 @@ class TestPVPVirtioWith2Mhuge(TestCase):
>          start testpmd on vhost
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'net_vhost0,iface=vhost-net,queues=1'"]
> +        vdev = ["net_vhost0,iface=vhost-net,queues=1"]
>          eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost_user, prefix='vhost',
> ports=[self.pci_info], vdevs=vdev)
>          command_line_client = testcmd + eal_params + " -- -i"
>          self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
>          self.vhost_user.send_expect("start", "testpmd> ", 120)
> 
> -    def start_testpmd_as_virtio(self):
> +    def start_testpmd_as_virtio(self, packed=False):
>          """
>          start testpmd on virtio
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " --single-file-segments
> --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1 --
> -i"
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user, no_pci=True,
> prefix='virtio-user', ports=[self.pci_info])
> -        command_line_user = testcmd + eal_params + vdev
> +        vdev =
> 'net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1' if not
> packed else
> 'net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1,packed_v
> q=1'
> +        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user, no_pci=True,
> prefix='virtio-user', ports=[self.pci_info], vdevs=[vdev])
> +        command_line_user = testcmd + eal_params + '
> --single-file-segments -- -i'
>          self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
>          self.virtio_user.send_expect("start", "testpmd> ", 120)
> 
> @@ -154,7 +154,7 @@ class TestPVPVirtioWith2Mhuge(TestCase):
>          self.dut.close_session(self.vhost_user)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_perf_pvp_virtio_user_with_2M_hugepages(self):
> +    def test_perf_pvp_virtio_user_split_ring_2M_hugepages(self):
>          """
>          Basic test for virtio-user 2M hugepage
>          """
> @@ -164,6 +164,16 @@ class TestPVPVirtioWith2Mhuge(TestCase):
>          self.result_table_print()
>          self.close_all_apps()
> 
> +    def test_perf_pvp_virtio_user_packed_ring_2M_hugepages(self):
> +        """
> +        Basic test for virtio-user 2M hugepage
> +        """
> +        self.start_testpmd_as_vhost()
> +        self.start_testpmd_as_virtio(packed=True)
> +        self.send_and_verify()
> +        self.result_table_print()
> +        self.close_all_apps()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 02/11]loopback_multi_queues: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 02/11]loopback_multi_queues: " Xiao Qimai
@ 2020-03-25  8:23   ` Wang, Yinan
  2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Wang, Yinan @ 2020-03-25  8:23 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao Qimai
> Sent: 2020年3月25日 16:11
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 02/11]loopback_multi_queues: update script according
> to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_loopback_multi_queues.py | 55
> +++++++++++++++++++++++++++-----
>  1 file changed, 47 insertions(+), 8 deletions(-)
> 
> diff --git a/tests/TestSuite_loopback_multi_queues.py
> b/tests/TestSuite_loopback_multi_queues.py
> index 589bf06..174ce01 100644
> --- a/tests/TestSuite_loopback_multi_queues.py
> +++ b/tests/TestSuite_loopback_multi_queues.py
> @@ -98,7 +98,7 @@ class TestLoopbackMultiQueues(TestCase):
>          start testpmd on virtio
>          """
>          eal_param =
> self.dut.create_eal_parameters(cores=self.core_list_user, prefix='virtio',
> no_pci=True,
> vdevs=['net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,queues=%d,
> %s' % (self.queue_number, args["version"])])
> -        command_line_user = self.dut.target + "/app/testpmd " + eal_param
> + " -- -i %s --rss-ip --nb-cores=%d --rxq=%d --txq=%d --txd=1024 --rxd=1024" %
> (args["path"], self.nb_cores, self.queue_number, self.queue_number)
> +        command_line_user = self.dut.target + "/app/testpmd " +
> + eal_param + " -- -i %s --nb-cores=%d --rxq=%d --txq=%d --txd=1024
> + --rxd=1024" % (args["path"], self.nb_cores, self.queue_number,
> + self.queue_number)
>          self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
>          self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
>          self.virtio_user.send_expect("start", "testpmd> ", 120) @@ -162,6
> +162,7 @@ class TestLoopbackMultiQueues(TestCase):
>              if self.queue_number > 1:
>                  self.check_packets_of_each_queue(frame_size)
> 
> +
>      def verify_liner_for_multi_queue(self):
>          """
>          verify the Mpps of 8 queues is eight times of 1 queue @@ -191,7
> +192,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP virtio 1.1 Mergeable Path.
>          """
>          virtio_pmd_arg = {"version":
> "in_order=0,packed_vq=1,mrg_rxbuf=1",
> -                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -209,7 +210,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP virtio1.1 Normal Path.
>          """
>          virtio_pmd_arg = {"version":
> "in_order=0,packed_vq=1,mrg_rxbuf=0",
> -                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -227,7 +228,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP In_order mergeable Path.
>          """
>          virtio_pmd_arg = {"version":
> "packed_vq=0,in_order=1,mrg_rxbuf=1",
> -                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -245,7 +246,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP In_order no_mergeable Path.
>          """
>          virtio_pmd_arg = {"version":
> "packed_vq=0,in_order=1,mrg_rxbuf=0",
> -                        "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip"}
> +                        "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -263,12 +264,14 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP Mergeable Path.
>          """
>          virtio_pmd_arg = {"version":
> "packed_vq=0,in_order=0,mrg_rxbuf=1",
> -                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
>              self.get_core_mask()
>              self.start_vhost_testpmd()
> +            if self.queue_number == 8:
> +                virtio_pmd_arg["path"] = "--enable-hw-vlan-strip"
>              self.start_virtio_testpmd(virtio_pmd_arg)
>              self.send_and_verify("virito mergeable")
>              self.close_all_testpmd()
> @@ -281,7 +284,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP Normal Path.
>          """
>          virtio_pmd_arg = {"version":
> "packed_vq=0,in_order=0,mrg_rxbuf=0",
> -                            "path": "--tx-offloads=0x0
> --enable-hw-vlan-strip"}
> +                            "path": "--enable-hw-vlan-strip"}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -299,7 +302,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP Vector_RX Path
>          """
>          virtio_pmd_arg = {"version":
> "packed_vq=0,in_order=0,mrg_rxbuf=0",
> -                            "path": "--tx-offloads=0x0"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -312,6 +315,42 @@ class TestLoopbackMultiQueues(TestCase):
>          self.result_table_print()
>          self.verify_liner_for_multi_queue()
> 
> +    def
> test_loopback_with_virtio11_inorder_mergeable_path_multi_queue(self):
> +        """
> +        performance for Vhost PVP Vector_RX Path
> +        """
> +        virtio_pmd_arg = {"version":
> "packed_vq=1,mrg_rxbuf=1,in_order=1",
> +                            "path": ""}
> +        for i in self.verify_queue:
> +            self.nb_cores = i
> +            self.queue_number = i
> +            self.get_core_mask()
> +            self.start_vhost_testpmd()
> +            self.start_virtio_testpmd(virtio_pmd_arg)
> +            self.send_and_verify("virtio 1.1 inorder mergeable")
> +            self.close_all_testpmd()
> +
> +        self.result_table_print()
> +        self.verify_liner_for_multi_queue()
> +
> +    def
> test_loopback_with_virtio11_inorder_nonmergeable_path_multi_queue(self):
> +        """
> +        performance for Vhost PVP Vector_RX Path
> +        """
> +        virtio_pmd_arg = {"version":
> "packed_vq=1,mrg_rxbuf=0,in_order=1",
> +                            "path": ""}
> +        for i in self.verify_queue:
> +            self.nb_cores = i
> +            self.queue_number = i
> +            self.get_core_mask()
> +            self.start_vhost_testpmd()
> +            self.start_virtio_testpmd(virtio_pmd_arg)
> +            self.send_and_verify("virtio 1.1 inorder non-mergeable")
> +            self.close_all_testpmd()
> +
> +        self.result_table_print()
> +        self.verify_liner_for_multi_queue()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: " Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
@ 2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  2:59 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 10/11]virtio_event_idx_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_virtio_event_idx_interrupt.py | 34
> ++++++++++++++++++++++++---
>  1 file changed, 31 insertions(+), 3 deletions(-)
> 
> diff --git a/tests/TestSuite_virtio_event_idx_interrupt.py
> b/tests/TestSuite_virtio_event_idx_interrupt.py
> index 08ad676..bdcca29 100644
> --- a/tests/TestSuite_virtio_event_idx_interrupt.py
> +++ b/tests/TestSuite_virtio_event_idx_interrupt.py
> @@ -100,7 +100,7 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.vhost.send_expect(command_line, "testpmd> ", 30)
>          self.vhost.send_expect("start", "testpmd> ", 30)
> 
> -    def start_vms(self):
> +    def start_vms(self, packed=False):
>          """
>          start qemus
>          """
> @@ -113,6 +113,8 @@ class TestVirtioIdxInterrupt(TestCase):
>          if self.queues > 1:
>              vm_params['opt_queue'] = self.queues
>              opt_args = opt_args + ",mq=on,vectors=%d" % (2*self.queues + 2)
> +        if packed:
> +            opt_args = opt_args + ',packed=on'
>          vm_params['opt_settings'] = opt_args
>          self.vm.set_vm_device(**vm_params)
>          try:
> @@ -224,7 +226,7 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.vm.stop()
>          self.vhost.send_expect("quit", "#", 20)
> 
> -    def test_perf_virito_idx_interrupt_with_virtio_pci_driver_reload(self):
> +    def test_perf_split_ring_virito_pci_driver_reload(self):
>          """
>          virtio-pci driver reload test
>          """
> @@ -237,7 +239,7 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.verify(res is True, "Should increase the wait times of ixia")
>          self.stop_all_apps()
> 
> -    def test_perf_virtio_idx_interrupt_with_multi_queue(self):
> +    def
> test_perf_wake_up_split_ring_virtio_net_cores_with_event_idx_interrupt
> _mode_16queue(self):
>          """
>          wake up virtio-net cores with event idx interrupt mode 16 queues test
>          """
> @@ -250,6 +252,32 @@ class TestVirtioIdxInterrupt(TestCase):
>          self.check_each_queue_has_packets_info_on_vhost()
>          self.stop_all_apps()
> 
> +    def test_perf_packed_ring_virito_pci_driver_reload(self):
> +        """
> +        virtio-pci driver reload test
> +        """
> +        self.queues = 1
> +        self.nb_cores = 1
> +        self.start_vhost_testpmd()
> +        self.start_vms(packed=True)
> +        self.config_virito_net_in_vm()
> +        res = self.check_packets_after_reload_virtio_device(reload_times=30)
> +        self.verify(res is True, "Should increase the wait times of ixia")
> +        self.stop_all_apps()
> +
> +    def
> test_perf_wake_up_packed_ring_virtio_net_cores_with_event_idx_interru
> pt_mode_16queue(self):
> +        """
> +        wake up virtio-net cores with event idx interrupt mode 16 queues test
> +        """
> +        self.queues = 16
> +        self.nb_cores = 16
> +        self.start_vhost_testpmd()
> +        self.start_vms(packed=True)
> +        self.config_virito_net_in_vm()
> +        self.start_to_send_packets(delay=15)
> +        self.check_each_queue_has_packets_info_on_vhost()
> +        self.stop_all_apps()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 11/11]virtio_pvp_regression: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 11/11]virtio_pvp_regression: " Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
@ 2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  2:59 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 11/11]virtio_pvp_regression: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_virtio_pvp_regression.py | 34
> ++++++++++++++++++++++++++------
>  1 file changed, 28 insertions(+), 6 deletions(-)
> 
> diff --git a/tests/TestSuite_virtio_pvp_regression.py
> b/tests/TestSuite_virtio_pvp_regression.py
> index 97e522c..42aceae 100644
> --- a/tests/TestSuite_virtio_pvp_regression.py
> +++ b/tests/TestSuite_virtio_pvp_regression.py
> @@ -198,7 +198,7 @@ class TestVirtioPVPRegression(TestCase):
>                  if 'cpupin' in list(self.vm.params[i]['cpu'][0].keys()):
>                      self.vm.params[i]['cpu'][0].pop('cpupin')
> 
> -    def start_vm(self, qemu_path, qemu_version, modem, virtio_path):
> +    def start_vm(self, qemu_path, qemu_version, modem, virtio_path,
> packed=False):
>          """
>          start vm
>          """
> @@ -227,6 +227,8 @@ class TestVirtioPVPRegression(TestCase):
>              opt_args = 'disable-modern=false,' + opt_args
>          elif(modem == 0):
>              opt_args = 'disable-modern=true,' + opt_args
> +        if packed:
> +            opt_args = opt_args + ',packed=on'
>          vm_params['opt_settings'] = opt_args
>          self.vm.set_vm_device(**vm_params)
>          self.vm.load_config()
> @@ -344,7 +346,7 @@ class TestVirtioPVPRegression(TestCase):
>          self.dut.send_expect('killall -s INT qemu-system-x86_64', '# ')
>          self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
> 
> -    def pvp_regression_run(self, case_info, modem, virtio_path):
> +    def pvp_regression_run(self, case_info, modem, virtio_path,
> packed=False):
>          """
>          run different qemu verssion on different virtio path of pvp regression
>          modem = 0, start vm as virtio 0.95 @@ -357,7 +359,7 @@ class
> TestVirtioPVPRegression(TestCase):
>              version = self.qemu_list[i]["version"]
>              self.start_testpmd_as_vhost()
>              # use different modem and different path to start vm
> -            self.start_vm(path, version, modem, virtio_path)
> +            self.start_vm(path, version, modem, virtio_path,
> + packed=packed)
>              self.start_testpmd_in_vm(virtio_path)
>              self.logger.info("now testing the qemu path of %s" % path)
>              time.sleep(5)
> @@ -370,7 +372,7 @@ class TestVirtioPVPRegression(TestCase):
> 
>              self.logger.info('now reconnect from vm')
>              self.dut.send_expect('killall -s INT qemu-system-x86_64', '# ')
> -            self.start_vm(path, version, modem, virtio_path)
> +            self.start_vm(path, version, modem, virtio_path,
> + packed=packed)
>              self.start_testpmd_in_vm(virtio_path)
>              self.send_verify(case_info, version, "reconnect from vm")
> 
> @@ -397,7 +399,7 @@ class TestVirtioPVPRegression(TestCase):
>          virtio_path = 'mergeable'
>          self.pvp_regression_run(case_info, modem, virtio_path)
> 
> -    def test_perf_pvp_regression_normal_path(self):
> +    def test_perf_pvp_regression_non_mergeable_path(self):
>          """
>          Test the performance of one vm with virtio 0.95 on normal path
>          diff qemu + multi queue + reconnect @@ -407,7 +409,7 @@ class
> TestVirtioPVPRegression(TestCase):
>          virtio_path = 'normal'
>          self.pvp_regression_run(case_info, modem, virtio_path)
> 
> -    def test_perf_pvp_regression_modern_normal_path(self):
> +    def test_perf_pvp_regression_modern_non_mergeable_path(self):
>          """
>          Test the performance of one vm with virtio 1.0 on normal path
>          diff qemu + multi queue + reconnect @@ -437,6 +439,26 @@ class
> TestVirtioPVPRegression(TestCase):
>          virtio_path = 'vector_rx'
>          self.pvp_regression_run(case_info, modem, virtio_path)
> 
> +    def test_perf_pvp_with_virtio11_mergeable_path(self):
> +        """
> +        Test the performance of one vm with virtio 1.1 on mergeable path
> +        diff qemu + multi queue + reconnect
> +        """
> +        case_info = 'virtio-1.1 mergeable'
> +        modem = 1
> +        virtio_path = 'mergeable'
> +        self.pvp_regression_run(case_info, modem, virtio_path,
> + packed=True)
> +
> +    def test_perf_pvp_with_virtio11_non_mergeable_path(self):
> +        """
> +        Test the performance of one vm with virtio 1.1 on mergeable path
> +        diff qemu + multi queue + reconnect
> +        """
> +        case_info = 'virtio-1.1 normal'
> +        modem = 1
> +        virtio_path = 'normal'
> +        self.pvp_regression_run(case_info, modem, virtio_path,
> + packed=True)
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 07/11]vhost_user_live_migration: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 07/11]vhost_user_live_migration: " Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
@ 2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  2:59 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 07/11]vhost_user_live_migration: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_user_live_migration.py | 71
> +++++++++++++++++++++++++---
>  1 file changed, 65 insertions(+), 6 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_user_live_migration.py
> b/tests/TestSuite_vhost_user_live_migration.py
> index 5c5db0d..cb6789c 100644
> --- a/tests/TestSuite_vhost_user_live_migration.py
> +++ b/tests/TestSuite_vhost_user_live_migration.py
> @@ -151,7 +151,7 @@ class TestVhostUserLiveMigration(TestCase):
>          if zero_copy is True:
>              zero_copy_str = ',dequeue-zero-copy=1'
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'eth_vhost0,iface=%s/vhost-net,queues=%d%s'" %
> (self.base_dir, self.queue_number, zero_copy_str)]
> +        vdev = ['eth_vhost0,iface=%s/vhost-net,queues=%d%s' %
> + (self.base_dir, self.queue_number, zero_copy_str)]
>          para = " -- -i --nb-cores=%d --rxq=%d --txq=%d" % (self.queue_number,
> self.queue_number, self.queue_number)
>          eal_params_first = self.dut.create_eal_parameters(cores=self.core_list0,
> prefix='vhost', ports=[self.host_pci_info], vdevs=vdev)
>          eal_params_secondary =
> self.dut.create_eal_parameters(cores=self.core_list1, prefix='vhost',
> ports=[self.backup_pci_info], vdevs=vdev) @@ -166,7 +166,7 @@ class
> TestVhostUserLiveMigration(TestCase):
>          self.backup_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ', 30)
>          self.backup_dut.send_expect('start', 'testpmd> ', 30)
> 
> -    def setup_vm_env_on_both_dut(self, driver='default'):
> +    def setup_vm_env_on_both_dut(self, driver='default', packed=False):
>          """
>          Create testing environment on Host and Backup
>          """
> @@ -183,6 +183,8 @@ class TestVhostUserLiveMigration(TestCase):
>              if self.queue_number > 1:
>                  vhost_params['opt_queue'] = self.queue_number
>                  opt_params = 'mrg_rxbuf=on,mq=on,vectors=%d' %
> (2*self.queue_number + 2)
> +            if packed:
> +                opt_params = opt_params + ',packed=on'
>              vhost_params['opt_settings'] = opt_params
>              self.host_vm.set_vm_device(**vhost_params)
> 
> @@ -385,7 +387,7 @@ class TestVhostUserLiveMigration(TestCase):
>          # make sure still can receive packets
>          verify_fun(self.vm_dut_backup)
> 
> -    def test_migrate_with_virtio_net(self):
> +    def test_migrate_with_split_ring_virtio_net(self):
>          """
>          Verify migrate virtIO device from host to backup host,
>          Verify before/in/after migration, device with kernel driver can receive
> packets @@ -402,7 +404,7 @@ class TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_kernel)
> 
> -    def test_migrete_with_vritio_net_with_multi_queue(self):
> +    def
> test_adjust_split_ring_virtio_net_queue_numbers_while_migreting_with_v
> irtio_net(self):
>          self.queue_number = 4
>          self.launch_testpmd_as_vhost_on_both_dut()
>          self.start_testpmd_with_fwd_mode_on_both_dut()
> @@ -414,7 +416,7 @@ class TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_kernel, True)
> 
> -    def test_migrate_with_virtio_pmd(self):
> +    def test_migrate_with_split_ring_virtio_pmd(self):
>          self.queue_number = 1
>          self.launch_testpmd_as_vhost_on_both_dut()
>          self.start_testpmd_with_fwd_mode_on_both_dut()
> @@ -426,7 +428,7 @@ class TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_dpdk)
> 
> -    def test_migrate_with_zero_copy_virtio_pmd(self):
> +    def test_migrate_with_split_ring_virtio_pmd_zero_copy(self):
>          self.queue_number = 1
>          zero_copy = True
>          # start testpmd and qemu on dut @@ -442,6 +444,63 @@ class
> TestVhostUserLiveMigration(TestCase):
> 
>          self.send_and_verify(self.verify_dpdk)
> 
> +    def test_migrate_with_packed_ring_virtio_pmd(self):
> +        self.queue_number = 1
> +        self.launch_testpmd_as_vhost_on_both_dut()
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +        self.setup_vm_env_on_both_dut(packed=True)
> +
> +        # bind virtio-net to igb_uio
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
> +        self.start_testpmd_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_dpdk)
> +
> +    def test_migrate_with_packed_ring_virtio_pmd_zero_copy(self):
> +        self.queue_number = 1
> +        zero_copy = True
> +        # start testpmd and qemu on dut
> +        # after qemu start ok, then send 'start' command to testpmd
> +        # if send 'start' command before start qemu, maybe qemu will start
> failed
> +        self.launch_testpmd_as_vhost_on_both_dut(zero_copy)
> +        self.setup_vm_env_on_both_dut(packed=True)
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +
> +        # bind virtio-net to igb_uio
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
> +        self.start_testpmd_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_dpdk)
> +
> +    def test_migrate_with_packed_ring_virtio_net(self):
> +        """
> +        Verify migrate virtIO device from host to backup host,
> +        Verify before/in/after migration, device with kernel driver can receive
> packets
> +        """
> +        self.queue_number = 1
> +        self.launch_testpmd_as_vhost_on_both_dut()
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +        self.setup_vm_env_on_both_dut(packed=True)
> +
> +        # bind virtio-net back to virtio-pci
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
> +        # start screen and tcpdump on vm
> +        self.start_tcpdump_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_kernel)
> +
> +    def
> test_adjust_packed_ring_virtio_net_queue_numbers_while_migreting_wit
> h_virtio_net(self):
> +        self.queue_number = 4
> +        self.launch_testpmd_as_vhost_on_both_dut()
> +        self.start_testpmd_with_fwd_mode_on_both_dut()
> +        self.setup_vm_env_on_both_dut(packed=True)
> +
> +        # bind virtio-net back to virtio-pci
> +        self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
> +        self.start_tcpdump_on_vm(self.vm_dut_host)
> +
> +        self.send_and_verify(self.verify_kernel, True)
> +
>      def tear_down(self):
>          self.destroy_vm_env()
>          # stop send packet on tester
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: " Xiao Qimai
  2020-03-25  8:21   ` Wang, Yinan
@ 2020-03-31  2:59   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  2:59 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 06/11]vhost_event_idx_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_event_idx_interrupt.py | 52
> +++++++++++++++++++++++++---
>  1 file changed, 47 insertions(+), 5 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_event_idx_interrupt.py
> b/tests/TestSuite_vhost_event_idx_interrupt.py
> index fb7da60..5a9f932 100644
> --- a/tests/TestSuite_vhost_event_idx_interrupt.py
> +++ b/tests/TestSuite_vhost_event_idx_interrupt.py
> @@ -161,7 +161,7 @@ class TestVhostEventIdxInterrupt(TestCase):
>                      'This qemu version should greater than 2.7 ' + \
>                      'in this suite, please config it in vhost_sample.cfg file')
> 
> -    def start_vms(self, vm_num=1):
> +    def start_vms(self, vm_num=1, packed=False):
>          """
>          start qemus
>          """
> @@ -178,6 +178,8 @@ class TestVhostEventIdxInterrupt(TestCase):
>                  opt_args = "csum=on,mq=on,vectors=%d" % (2*self.queues + 2)
>              else:
>                  opt_args = "csum=on"
> +            if packed:
> +                opt_args = opt_args + ',packed=on'
>              vm_params['opt_settings'] = opt_args
>              vm_info.set_vm_device(**vm_params)
>              self.set_vm_cpu_number(vm_info) @@ -250,9 +252,9 @@ class
> TestVhostEventIdxInterrupt(TestCase):
>          """
>          for i in range(len(self.vm)):
>              self.vm[i].stop()
> -        self.vhost.send_expect("^c", "#", 10)
> +        self.dut.send_expect("killall l3fwd-power", "#", timeout=2)
> 
> -    def test_vhost_idx_interrupt(self):
> +    def
> test_wake_up_split_ring_vhost_user_core_with_event_idx_interrupt(self):
>          """
>          wake up vhost-user core with l3fwd-power sample
>          """
> @@ -265,7 +267,7 @@ class TestVhostEventIdxInterrupt(TestCase):
>          self.send_and_verify()
>          self.stop_all_apps()
> 
> -    def test_vhost_idx_interrupt_with_multi_queue(self):
> +    def
> test_wake_up_split_ring_vhost_user_cores_with_event_idx_interrupt_mo
> de_16_queues(self):
>          """
>          wake up vhost-user core with l3fwd-power sample when multi queues
> are enabled
>          """
> @@ -279,7 +281,7 @@ class TestVhostEventIdxInterrupt(TestCase):
>          self.send_and_verify()
>          self.stop_all_apps()
> 
> -    def test_vhost_idx_interrupt_with_multi_vms(self):
> +    def
> test_wake_up_split_ring_vhost_user_cores_by_multi_virtio_net_in_vms_
> with_event_idx_interrupt(self):
>          """
>          wake up vhost-user cores with l3fwd-power sample and multi VMs
>          """
> @@ -292,6 +294,46 @@ class TestVhostEventIdxInterrupt(TestCase):
>          self.send_and_verify()
>          self.stop_all_apps()
> 
> +    def
> test_wake_up_packed_ring_vhost_user_core_with_event_idx_interrupt(s
> elf):
> +        """
> +        wake up vhost-user core with l3fwd-power sample
> +        """
> +        self.vm_num = 1
> +        self.queues = 1
> +        self.get_core_mask()
> +        self.lanuch_l3fwd_power()
> +        self.start_vms(vm_num=self.vm_num, packed=True)
> +        self.relanuch_l3fwd_power()
> +        self.send_and_verify()
> +        self.stop_all_apps()
> +
> +    def
> test_wake_up_packed_ring_vhost_user_cores_with_event_idx_interrupt_
> mode_16_queues(self):
> +        """
> +        wake up vhost-user core with l3fwd-power sample when multi queues
> are enabled
> +        """
> +        self.vm_num = 1
> +        self.queues = 16
> +        self.get_core_mask()
> +        self.lanuch_l3fwd_power()
> +        self.start_vms(vm_num=self.vm_num, packed=True)
> +        self.relanuch_l3fwd_power()
> +        self.config_virito_net_in_vm()
> +        self.send_and_verify()
> +        self.stop_all_apps()
> +
> +    def
> test_wake_up_packed_ring_vhost_user_cores_by_multi_virtio_net_in_vm
> s_with_event_idx_interrupt(self):
> +        """
> +        wake up vhost-user cores with l3fwd-power sample and multi VMs
> +        """
> +        self.vm_num = 2
> +        self.queues = 1
> +        self.get_core_mask()
> +        self.lanuch_l3fwd_power()
> +        self.start_vms(vm_num=self.vm_num, packed=True)
> +        self.relanuch_l3fwd_power()
> +        self.send_and_verify()
> +        self.stop_all_apps()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: " Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
@ 2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 09/11]vhost_virtio_user_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_virtio_user_interrupt.py | 72
> +++++++++++++++++++++-----
>  1 file changed, 59 insertions(+), 13 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_virtio_user_interrupt.py
> b/tests/TestSuite_vhost_virtio_user_interrupt.py
> index 610e56e..69d5668 100644
> --- a/tests/TestSuite_vhost_virtio_user_interrupt.py
> +++ b/tests/TestSuite_vhost_virtio_user_interrupt.py
> @@ -89,13 +89,13 @@ class TestVirtioUserInterrupt(TestCase):
>          out = self.dut.build_dpdk_apps("./examples/l3fwd-power")
>          self.verify("Error" not in out, "compilation l3fwd-power error")
> 
> -    def launch_l3fwd(self, path):
> +    def launch_l3fwd(self, path, packed=False):
>          self.core_interrupt = self.core_list_l3fwd[0]
>          example_para = "./examples/l3fwd-power/build/l3fwd-power "
> -        vdev = " --log-level='user1,7' --vdev=virtio_user0,path=%s,cq=1 -- -p 1" %
> path
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='l3fwd-
> pwd', no_pci=True, ports=[self.pci_info])
> +        vdev = "virtio_user0,path=%s,cq=1" % path if not packed else
> "virtio_user0,path=%s,cq=1,packed_vq=1" % path
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_l3fwd,
> + prefix='l3fwd-pwd', no_pci=True, ports=[self.pci_info], vdevs=[vdev])
>          para = " --config='(0,0,%s)' --parse-ptype" % self.core_interrupt
> -        cmd_l3fwd = example_para + eal_params + vdev + para
> +        cmd_l3fwd = example_para + eal_params + " --log-level='user1,7'
> + -- -p 1 " + para
>          self.l3fwd.get_session_before(timeout=2)
>          self.l3fwd.send_expect(cmd_l3fwd, "POWER", 40)
>          time.sleep(10)
> @@ -110,27 +110,27 @@ class TestVirtioUserInterrupt(TestCase):
>          start testpmd on vhost side
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'net_vhost0,iface=vhost-net,queues=1,client=0'"]
> +        vdev = ["net_vhost0,iface=vhost-net,queues=1,client=0"]
>          para = " -- -i --rxq=1 --txq=1"
>          if len(pci) == 0:
>              eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost,
> ports=[self.pci_info], vdevs=vdev)
>          else:
> -            eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost, prefix='vhost',
> no_pci=True, ports=[self.pci_info], vdevs=vdev)
> +            eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_vhost,
> + prefix='vhost', no_pci=True, vdevs=vdev)
>          cmd_vhost_user = testcmd + eal_params + para
> 
>          self.vhost.send_expect(cmd_vhost_user, "testpmd>", 30)
>          self.vhost.send_expect("set fwd mac", "testpmd>", 30)
>          self.vhost.send_expect("start", "testpmd>", 30)
> 
> -    def start_virtio_user(self):
> +    def start_virtio_user(self, packed=False):
>          """
>          start testpmd on virtio side
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-
> net"
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_l3fwd, prefix='virtio',
> no_pci=True, ports=[self.pci_info])
> +        vdev = "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net" if
> not packed else "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-
> net,packed_vq=1"
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_l3fwd,
> + prefix='virtio', no_pci=True, vdevs=[vdev])
>          para = " -- -i --txd=512 --rxd=128 --tx-offloads=0x00"
> -        cmd_virtio_user = testcmd + eal_params + vdev + para
> +        cmd_virtio_user = testcmd + eal_params + para
>          self.virtio.send_expect(cmd_virtio_user, "testpmd>", 120)
>          self.virtio.send_expect("set fwd mac", "testpmd>", 20)
>          self.virtio.send_expect("start", "testpmd>", 20) @@ -154,7 +154,7 @@
> class TestVirtioUserInterrupt(TestCase):
>          else:
>              self.logger.error("Wrong link status not right, status is %s" % result)
> 
> -    def test_interrupt_with_vhost_net_as_backed(self):
> +    def
> test_split_ring_virtio_user_interrupt_with_vhost_net_as_backed(self):
>          """
>          Check the virtio-user interrupt can work when use vhost-net as backend
>          """
> @@ -175,7 +175,7 @@ class TestVirtioUserInterrupt(TestCase):
>          self.check_interrupt_log(status="waked up")
>          self.dut.send_expect("killall -s INT ping", "#")
> 
> -    def test_interrupt_with_vhost_user_as_backed(self):
> +    def
> test_split_ring_virtio_user_interrupt_with_vhost_user_as_backed(self):
>          """
>          Check the virtio-user interrupt can work when use vhost-user as
> backend
>          """
> @@ -189,7 +189,7 @@ class TestVirtioUserInterrupt(TestCase):
>              time.sleep(3)
>              self.check_interrupt_log(status="waked up")
> 
> -    def test_lsc_event_between_vhost_and_virtio_user(self):
> +    def
> test_lsc_event_between_vhost_user_and_virtio_user_with_split_ring(self):
>          """
>          LSC event between vhost-user and virtio-user
>          """
> @@ -200,6 +200,52 @@ class TestVirtioUserInterrupt(TestCase):
>          self.vhost.send_expect("quit", "#", 20)
>          self.check_virtio_side_link_status("down")
> 
> +    def
> test_packed_ring_virtio_user_interrupt_with_vhost_user_as_backed(self):
> +        """
> +        Check the virtio-user interrupt can work when use vhost-user as
> backend
> +        """
> +        self.start_vhost_testpmd(pci="")
> +        self.launch_l3fwd(path="./vhost-net", packed=True)
> +        # double check the status of interrupt core
> +        for i in range(2):
> +
> self.tester.scapy_append('pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]
> ')
> +            self.tester.scapy_append('sendp(pk, iface="%s", count=100)' %
> self.tx_interface)
> +            self.tester.scapy_execute()
> +            time.sleep(3)
> +            self.check_interrupt_log(status="waked up")
> +
> +    def
> test_packed_ring_virtio_user_interrupt_with_vhost_net_as_backed(self):
> +        """
> +        Check the virtio-user interrupt can work when use vhost-net as
> backend
> +        """
> +        self.launch_l3fwd(path="/dev/vhost-net", packed=True)
> +        self.virtio.send_expect("ifconfig tap0 up", "#", 20)
> +        self.virtio.send_expect("ifconfig tap0 1.1.1.2", "#", 20)
> +        # start to ping, check the status of interrupt core
> +        self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20)
> +        time.sleep(3)
> +        self.check_interrupt_log(status="waked up")
> +        # stop ping, check the status of interrupt core
> +        self.dut.send_expect("killall -s INT ping", "#")
> +        time.sleep(2)
> +        self.check_interrupt_log(status="sleeps")
> +        # restart ping, check the status of interrupt core
> +        self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20)
> +        time.sleep(3)
> +        self.check_interrupt_log(status="waked up")
> +        self.dut.send_expect("killall -s INT ping", "#")
> +
> +    def
> test_lsc_event_between_vhost_user_and_virtio_user_with_packed_ring(s
> elf):
> +        """
> +        LSC event between vhost-user and virtio-user
> +        """
> +        self.start_vhost_testpmd(pci="--no-pci")
> +        self.start_virtio_user(packed=True)
> +        self.check_virtio_side_link_status("up")
> +
> +        self.vhost.send_expect("quit", "#", 20)
> +        self.check_virtio_side_link_status("down")
> +
>      def tear_down(self):
>          """
>          run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: " Xiao Qimai
  2020-03-25  8:20   ` Wang, Yinan
@ 2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 04/11]pvp_virtio_user_4k_pages: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_pvp_virtio_user_4k_pages.py | 28
> ++++++++++++++++++++--------
>  1 file changed, 20 insertions(+), 8 deletions(-)
> 
> diff --git a/tests/TestSuite_pvp_virtio_user_4k_pages.py
> b/tests/TestSuite_pvp_virtio_user_4k_pages.py
> index 5e6f657..532b6c1 100644
> --- a/tests/TestSuite_pvp_virtio_user_4k_pages.py
> +++ b/tests/TestSuite_pvp_virtio_user_4k_pages.py
> @@ -130,21 +130,21 @@ class TestPvpVirtioUser4kPages(TestCase):
>          Start testpmd on vhost
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " -m 1024 --no-huge --vdev 'net_vhost0,iface=vhost-
> net,queues=1'"
> +        vdev = 'net_vhost0,iface=vhost-net,queues=1'
>          para = " -- -i --no-numa --socket-num=%d" % self.ports_socket
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost_user,
> prefix='vhost', ports=[self.pci_info])
> -        command_line_client = testcmd + eal_params + vdev + para
> +        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost_user,
> prefix='vhost', ports=[self.pci_info], vdevs=[vdev])
> +        command_line_client = testcmd + eal_params + ' -m 1024
> + --no-huge' + para
>          self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
>          self.vhost_user.send_expect("start", "testpmd> ", 120)
> 
> -    def start_testpmd_as_virtio(self):
> +    def start_testpmd_as_virtio(self, packed=False):
>          """
>          Start testpmd on virtio
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " --no-huge -m 1024 --
> vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1 --
> -i"
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user,
> prefix='virtio-user', ports=[self.pci_info])
> -        command_line_user = testcmd + eal_params + vdev
> +        vdev = "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-
> net,queues=1" if not packed else
> "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-
> net,packed_vq=1,queues=1"
> +        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user,
> prefix='virtio-user', ports=[self.pci_info], vdevs=[vdev])
> +        command_line_user = testcmd + eal_params + ' --no-huge -m 1024 -- -i'
>          self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
>          self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
>          self.virtio_user.send_expect("start", "testpmd> ", 120) @@ -168,7
> +168,7 @@ class TestPvpVirtioUser4kPages(TestCase):
>          self.dut.close_session(self.vhost_user)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_perf_pvp_virtio_user_with_4K_pages(self):
> +    def test_perf_pvp_virtio_user_split_ring_with_4K_pages(self):
>          """
>          Basic test for virtio-user 4K pages
>          """
> @@ -179,6 +179,18 @@ class TestPvpVirtioUser4kPages(TestCase):
>          self.result_table_print()
>          self.close_all_apps()
> 
> +    def test_perf_pvp_virtio_user_packed_ring_with_4K_pages(self):
> +        """
> +        Basic test for virtio-user 4K pages
> +        """
> +        self.start_testpmd_as_vhost()
> +        self.prepare_tmpfs_for_4k()
> +        self.start_testpmd_as_virtio(packed=True)
> +        self.send_and_verify()
> +        self.result_table_print()
> +        self.close_all_apps()
> +
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: " Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
@ 2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 08/11]vhost_virtio_pmd_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_virtio_pmd_interrupt.py | 19 ++++++++++++++++---
>  1 file changed, 16 insertions(+), 3 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> index 5e6437e..ca69932 100644
> --- a/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> +++ b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
> @@ -117,7 +117,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>          # get the core list depend on current nb_cores number
>          self.get_core_list()
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'net_vhost0,iface=%s/vhost-net,queues=%d'" %
> (self.base_dir, self.queues)]
> +        vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' %
> + (self.base_dir, self.queues)]
>          eal_params = self.dut.create_eal_parameters(cores=self.core_list,
> ports=[self.pci_info], vdevs=vdev)
>          para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --rss-ip" % (self.nb_cores,
> self.queues, self.queues)
>          command_line_client = testcmd + eal_params + para @@ -166,7 +166,7
> @@ class TestVhostVirtioPmdInterrupt(TestCase):
>              if list(self.vm.params[i].keys())[0] == 'cpu':
>                  self.vm.params[i]['cpu'][0]['number'] = self.queues
> 
> -    def start_vms(self, mode=0):
> +    def start_vms(self, mode=0, packed=False):
>          """
>          start qemus
>          """
> @@ -177,7 +177,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>          vm_params['opt_path'] = '%s/vhost-net' % self.base_dir
>          vm_params['opt_mac'] = "00:11:22:33:44:55"
>          vm_params['opt_queue'] = self.queues
> -        opt_param = "mrg_rxbuf=on,csum=on,mq=on,vectors=%d" %
> (2*self.queues+2)
> +        opt_param = "mrg_rxbuf=on,csum=on,mq=on,vectors=%d" %
> + (2*self.queues+2) if not packed else
> + "mrg_rxbuf=on,csum=on,mq=on,vectors=%d,packed=on" %
> (2*self.queues+2)
>          if mode == 0:
>              vm_params['opt_settings'] = "disable-modern=true," + opt_param
>          elif mode == 1:
> @@ -311,6 +311,19 @@ class TestVhostVirtioPmdInterrupt(TestCase):
>          self.launch_l3fwd_power_in_vm()
>          self.send_and_verify()
> 
> +    def test_perf_packed_ring_virtio_interrupt_with_16queues(self):
> +        """
> +        wake up virtio_user 0.95 core with l3fwd-power sample
> +        """
> +        self.queues = 16
> +        self.nb_cores = 16
> +        self.start_testpmd_on_vhost()
> +        self.start_vms(mode=0, packed=True)
> +        self.prepare_vm_env()
> +        self.launch_l3fwd_power_in_vm()
> +        self.send_and_verify()
> +
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: " Xiao Qimai
  2020-03-25  8:22   ` Wang, Yinan
@ 2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 05/11]vhost_enqueue_interrupt: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_vhost_enqueue_interrupt.py | 36
> +++++++++++++++++++++++-------
>  1 file changed, 28 insertions(+), 8 deletions(-)
> 
> diff --git a/tests/TestSuite_vhost_enqueue_interrupt.py
> b/tests/TestSuite_vhost_enqueue_interrupt.py
> index 509c7f3..9d0e024 100644
> --- a/tests/TestSuite_vhost_enqueue_interrupt.py
> +++ b/tests/TestSuite_vhost_enqueue_interrupt.py
> @@ -83,14 +83,14 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.core_list_virtio = core_list[0: self.queues+1]
>          self.core_list_l3fwd = core_list[self.queues+1: need_num]
> 
> -    def lanuch_virtio_user(self):
> +    def lanuch_virtio_user(self, packed=False):
>          """
>          launch virtio-user with server mode
>          """
> -        vdev = "--vdev=net_virtio_user0,mac=%s,path=./vhost-
> net,server=1,queues=%d" % (self.vmac, self.queues)
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio, prefix='virtio',
> no_pci=True, ports=[self.pci_info])
> +        vdev = "net_virtio_user0,mac=%s,path=./vhost-
> net,server=1,queues=%d" % (self.vmac, self.queues) if not packed else
> "net_virtio_user0,mac=%s,path=./vhost-
> net,server=1,queues=%d,packed_vq=1" % (self.vmac, self.queues)
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_virtio,
> + prefix='virtio', no_pci=True, ports=[self.pci_info], vdevs=[vdev])
>          para = " -- -i --rxq=%d --txq=%d --rss-ip" % (self.queues, self.queues)
> -        command_line_client =  self.dut.target + "/app/testpmd " + eal_params
> + vdev + para
> +        command_line_client =  self.dut.target + "/app/testpmd " +
> + eal_params + para
>          self.virtio_user.send_expect(command_line_client, "testpmd> ", 120)
>          self.virtio_user.send_expect("set fwd txonly", "testpmd> ", 20)
> 
> @@ -109,9 +109,9 @@ class TestVhostEnqueueInterrupt(TestCase):
>              self.verify_info.append(info)
> 
>          example_cmd = "./examples/l3fwd-power/build/l3fwd-power "
> -        vdev = [r"'net_vhost0,iface=vhost-net,queues=%d,client=1'" %
> self.queues]
> +        vdev = 'net_vhost0,iface=vhost-net,queues=%d,client=1' %
> + self.queues
>          para = " -- -p 0x1 --parse-ptype 1 --config '%s' " % config_info
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_l3fwd, no_pci=True,
> ports=[self.pci_info], vdevs=vdev)
> +        eal_params =
> + self.dut.create_eal_parameters(cores=self.core_list_l3fwd,
> + no_pci=True, ports=[self.pci_info], vdevs=[vdev])
>          command_line_client = example_cmd + eal_params + para
>          self.vhost.get_session_before(timeout=2)
>          self.vhost.send_expect(command_line_client, "POWER", 40) @@ -156,7
> +156,7 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.dut.close_session(self.vhost)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_virtio_user_interrupt(self):
> +    def
> test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample(self
> ):
>          """
>          Check the virtio-user interrupt can work when use vhost-net as backend
>          """
> @@ -166,7 +166,7 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.lanuch_l3fwd_power()
>          self.send_and_verify()
> 
> -    def test_virtio_user_interrupt_with_multi_queue(self):
> +    def
> test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample_wh
> en_multi_queues_enabled(self):
>          """
>          Check the virtio-user interrupt can work with multi queue
>          """
> @@ -176,6 +176,26 @@ class TestVhostEnqueueInterrupt(TestCase):
>          self.lanuch_l3fwd_power()
>          self.send_and_verify()
> 
> +    def
> test_wake_up_packed_ring_vhost_user_core_with_l3fwd_power_sample(
> self):
> +        """
> +        Check the virtio-user interrupt can work when use vhost-net as
> backend
> +        """
> +        self.queues = 1
> +        self.get_core_list()
> +        self.lanuch_virtio_user(packed=True)
> +        self.lanuch_l3fwd_power()
> +        self.send_and_verify()
> +
> +    def
> test_wake_up_packed_ring_vhost_user_core_with_l3fwd_power_sample_
> when_multi_queues_enabled(self):
> +        """
> +        Check the virtio-user interrupt can work with multi queue
> +        """
> +        self.queues = 4
> +        self.get_core_list()
> +        self.lanuch_virtio_user(packed=True)
> +        self.lanuch_l3fwd_power()
> +        self.send_and_verify()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: " Xiao Qimai
  2020-03-25  8:23   ` Wang, Yinan
@ 2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 03/11]pvp_virtio_user_2M_hugepages: update
> script according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_pvp_virtio_user_2M_hugepages.py | 22
> ++++++++++++++++------
>  1 file changed, 16 insertions(+), 6 deletions(-)
> 
> diff --git a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> index 9d2eaed..ac7187c 100644
> --- a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> +++ b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py
> @@ -128,20 +128,20 @@ class TestPVPVirtioWith2Mhuge(TestCase):
>          start testpmd on vhost
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = [r"'net_vhost0,iface=vhost-net,queues=1'"]
> +        vdev = ["net_vhost0,iface=vhost-net,queues=1"]
>          eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_vhost_user,
> prefix='vhost', ports=[self.pci_info], vdevs=vdev)
>          command_line_client = testcmd + eal_params + " -- -i"
>          self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
>          self.vhost_user.send_expect("start", "testpmd> ", 120)
> 
> -    def start_testpmd_as_virtio(self):
> +    def start_testpmd_as_virtio(self, packed=False):
>          """
>          start testpmd on virtio
>          """
>          testcmd = self.dut.target + "/app/testpmd "
> -        vdev = " --single-file-segments --
> vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1 --
> -i"
> -        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user,
> no_pci=True, prefix='virtio-user', ports=[self.pci_info])
> -        command_line_user = testcmd + eal_params + vdev
> +        vdev = 'net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-
> net,queues=1' if not packed else
> 'net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-
> net,queues=1,packed_vq=1'
> +        eal_params =
> self.dut.create_eal_parameters(cores=self.core_list_virtio_user,
> no_pci=True, prefix='virtio-user', ports=[self.pci_info], vdevs=[vdev])
> +        command_line_user = testcmd + eal_params + ' --single-file-segments -
> - -i'
>          self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
>          self.virtio_user.send_expect("start", "testpmd> ", 120)
> 
> @@ -154,7 +154,7 @@ class TestPVPVirtioWith2Mhuge(TestCase):
>          self.dut.close_session(self.vhost_user)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_perf_pvp_virtio_user_with_2M_hugepages(self):
> +    def test_perf_pvp_virtio_user_split_ring_2M_hugepages(self):
>          """
>          Basic test for virtio-user 2M hugepage
>          """
> @@ -164,6 +164,16 @@ class TestPVPVirtioWith2Mhuge(TestCase):
>          self.result_table_print()
>          self.close_all_apps()
> 
> +    def test_perf_pvp_virtio_user_packed_ring_2M_hugepages(self):
> +        """
> +        Basic test for virtio-user 2M hugepage
> +        """
> +        self.start_testpmd_as_vhost()
> +        self.start_testpmd_as_virtio(packed=True)
> +        self.send_and_verify()
> +        self.result_table_print()
> +        self.close_all_apps()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (10 preceding siblings ...)
  2020-03-25  8:22 ` [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: " Wang, Yinan
@ 2020-03-31  3:00 ` Xiao, QimaiX
  2020-03-31  3:06 ` Tu, Lijuan
  12 siblings, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 01/11]loopback_multi_paths_port_restart: update
> script according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  .../TestSuite_loopback_multi_paths_port_restart.py | 85
> +++++++++++++++-------
>  1 file changed, 57 insertions(+), 28 deletions(-)
> 
> diff --git a/tests/TestSuite_loopback_multi_paths_port_restart.py
> b/tests/TestSuite_loopback_multi_paths_port_restart.py
> index 0b54b8d..5f1faad 100644
> --- a/tests/TestSuite_loopback_multi_paths_port_restart.py
> +++ b/tests/TestSuite_loopback_multi_paths_port_restart.py
> @@ -121,15 +121,21 @@ class TestLoopbackPortRestart(TestCase):
>                  break
>              time.sleep(3)
>              loop = loop + 1
> -
>          self.verify("down" not in port_status, "port can not up after restart")
> 
> -    def port_restart(self):
> -        self.vhost.send_expect("stop", "testpmd> ", 120)
> -        self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> -        self.check_port_throughput_after_port_stop()
> -        self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> -        self.vhost.send_expect("port start all", "testpmd> ", 120)
> +    def port_restart(self, restart_times=1):
> +        if restart_times == 1:
> +            self.vhost.send_expect("stop", "testpmd> ", 120)
> +            self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> +            self.check_port_throughput_after_port_stop()
> +            self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> +            self.vhost.send_expect("port start all", "testpmd> ", 120)
> +        else:
> +            for i in range(restart_times):
> +                self.vhost.send_expect("stop", "testpmd> ", 120)
> +                self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> +                self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> +                self.vhost.send_expect("port start all", "testpmd> ",
> + 120)
>          self.check_port_link_status_after_port_restart()
>          self.vhost.send_expect("set burst 1", "testpmd> ", 120)
>          self.vhost.send_expect("start tx_first 1", "testpmd> ", 120) @@ -156,7
> +162,7 @@ class TestLoopbackPortRestart(TestCase):
>          self.verify(Mpps > 0, "%s can not receive packets" % self.running_case)
>          return Mpps
> 
> -    def send_and_verify(self, case_info, frame_size):
> +    def send_and_verify(self, case_info, frame_size, restart_times=1):
>          """
>          start to send packets and calculate the average throughput
>          """
> @@ -166,7 +172,7 @@ class TestLoopbackPortRestart(TestCase):
>          Mpps = self.calculate_avg_throughput()
>          self.update_table_info(case_info, frame_size, Mpps, "Before Restart")
> 
> -        self.port_restart()
> +        self.port_restart(restart_times)
>          Mpps = self.calculate_avg_throughput()
>          self.update_table_info(case_info, frame_size, Mpps, "After Restart and
> set burst to 1")
> 
> @@ -184,7 +190,7 @@ class TestLoopbackPortRestart(TestCase):
>          self.dut.close_session(self.vhost)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_vhost_loopback_virtio11_mergeable_mac(self):
> +    def test_loopback_test_with_packed_ring_mergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on virtio1.1 mergeable
> path
>          """
> @@ -193,11 +199,11 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtio1.1 mergeable", frame_size)
> +            self.send_and_verify("packed ring mergeable", frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtio11_normal_mac(self):
> +    def test_loopback_test_with_packed_ring_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port ob virtio1.1 normal path
>          """
> @@ -206,63 +212,86 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtio1.1 normal", frame_size)
> +            self.send_and_verify("packed ring non-mergeable", frame_size)
> +            self.close_all_testpmd()
> +        self.result_table_print()
> +
> +    def
> test_lookback_test_with_packed_ring_inorder_mergeable_path(self):
> +        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=1,in_order=1",
> +                   "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +        for frame_size in self.frame_sizes:
> +            self.start_vhost_testpmd()
> +            self.start_virtio_user_testpmd(pmd_arg)
> +            self.send_and_verify("packed ring non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_inorder_mergeable_mac(self):
> +    def
> test_lookback_test_with_packed_ring_inorder_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on inorder mergeable
> path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1 ",
> -                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
> +        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=0,in_order=1",
> +                          "path": "--tx-offloads=0x0
> + --enable-hw-vlan-strip"}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("inorder mergeable", frame_size)
> +            self.send_and_verify("packed ring inorder non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_inorder_mergeable_off_mac(self):
> +    def test_lookback_test_with_split_ring_inorder_mergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on inorder normal path
>          """
> +        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> +                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
> +        for frame_size in self.frame_sizes:
> +            self.start_vhost_testpmd()
> +            self.start_virtio_user_testpmd(pmd_arg)
> +            self.send_and_verify("split ring inorder mergeable", frame_size)
> +            self.close_all_testpmd()
> +        self.result_table_print()
> +
> +    def
> test_lookback_test_with_split_ring_inorder_nonmergeable_path(self):
> +        """
> +        performance for [frame_sizes] and restart port on virtio normal path
> +        """
>          pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0 ",
>                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("inorder normal", frame_size)
> +            self.send_and_verify("split ring inorder non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_mergeable_mac(self):
> +    def test_lookback_test_with_split_ring_mergeable_path(self):
>          """
> -        performance for [frame_sizes] and restart port on virtio mergeable path
> +        performance for [frame_sizes] and restart port on virtio normal
> + path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1 ",
> +        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
>                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser mergeable", frame_size)
> +            self.send_and_verify("split ring mergeable", frame_size,
> + restart_times=100)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_normal_mac(self):
> +    def test_lookback_test_with_split_ring_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on virtio normal path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0 ",
> +        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
>                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser normal", frame_size)
> +            self.send_and_verify("split ring non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_vector_rx_mac(self):
> +    def test_loopback_test_with_split_ring_vector_rx_path(self):
>          """
>          performance for frame_sizes and restart port on virtio vector rx
>          """
> @@ -271,7 +300,7 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser vector_rx", frame_size)
> +            self.send_and_verify("split ring vector_rx", frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 02/11]loopback_multi_queues: update script according to testplan's update
  2020-03-25  8:10 ` [dts] [PATCH V1 02/11]loopback_multi_queues: " Xiao Qimai
  2020-03-25  8:23   ` Wang, Yinan
@ 2020-03-31  3:00   ` Xiao, QimaiX
  1 sibling, 0 replies; 34+ messages in thread
From: Xiao, QimaiX @ 2020-03-31  3:00 UTC (permalink / raw)
  To: dts

Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>

Regards,
Xiao Qimai


> -----Original Message-----
> From: Xiao, QimaiX
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts][PATCH V1 02/11]loopback_multi_queues: update script
> according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  tests/TestSuite_loopback_multi_queues.py | 55
> +++++++++++++++++++++++++++-----
>  1 file changed, 47 insertions(+), 8 deletions(-)
> 
> diff --git a/tests/TestSuite_loopback_multi_queues.py
> b/tests/TestSuite_loopback_multi_queues.py
> index 589bf06..174ce01 100644
> --- a/tests/TestSuite_loopback_multi_queues.py
> +++ b/tests/TestSuite_loopback_multi_queues.py
> @@ -98,7 +98,7 @@ class TestLoopbackMultiQueues(TestCase):
>          start testpmd on virtio
>          """
>          eal_param = self.dut.create_eal_parameters(cores=self.core_list_user,
> prefix='virtio', no_pci=True,
> vdevs=['net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-
> net,queues=%d,%s' % (self.queue_number, args["version"])])
> -        command_line_user = self.dut.target + "/app/testpmd " + eal_param +
> " -- -i %s --rss-ip --nb-cores=%d --rxq=%d --txq=%d --txd=1024 --rxd=1024" %
> (args["path"], self.nb_cores, self.queue_number, self.queue_number)
> +        command_line_user = self.dut.target + "/app/testpmd " +
> + eal_param + " -- -i %s --nb-cores=%d --rxq=%d --txq=%d --txd=1024
> + --rxd=1024" % (args["path"], self.nb_cores, self.queue_number,
> + self.queue_number)
>          self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
>          self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
>          self.virtio_user.send_expect("start", "testpmd> ", 120) @@ -162,6
> +162,7 @@ class TestLoopbackMultiQueues(TestCase):
>              if self.queue_number > 1:
>                  self.check_packets_of_each_queue(frame_size)
> 
> +
>      def verify_liner_for_multi_queue(self):
>          """
>          verify the Mpps of 8 queues is eight times of 1 queue @@ -191,7 +192,7
> @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP virtio 1.1 Mergeable Path.
>          """
>          virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1",
> -                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -209,7 +210,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP virtio1.1 Normal Path.
>          """
>          virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0",
> -                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -227,7 +228,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP In_order mergeable Path.
>          """
>          virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> -                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -245,7 +246,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP In_order no_mergeable Path.
>          """
>          virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0",
> -                        "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +                        "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -263,12 +264,14 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP Mergeable Path.
>          """
>          virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
> -                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
>              self.get_core_mask()
>              self.start_vhost_testpmd()
> +            if self.queue_number == 8:
> +                virtio_pmd_arg["path"] = "--enable-hw-vlan-strip"
>              self.start_virtio_testpmd(virtio_pmd_arg)
>              self.send_and_verify("virito mergeable")
>              self.close_all_testpmd()
> @@ -281,7 +284,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP Normal Path.
>          """
>          virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> -                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +                            "path": "--enable-hw-vlan-strip"}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -299,7 +302,7 @@ class TestLoopbackMultiQueues(TestCase):
>          performance for Vhost PVP Vector_RX Path
>          """
>          virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> -                            "path": "--tx-offloads=0x0"}
> +                            "path": ""}
>          for i in self.verify_queue:
>              self.nb_cores = i
>              self.queue_number = i
> @@ -312,6 +315,42 @@ class TestLoopbackMultiQueues(TestCase):
>          self.result_table_print()
>          self.verify_liner_for_multi_queue()
> 
> +    def
> test_loopback_with_virtio11_inorder_mergeable_path_multi_queue(self):
> +        """
> +        performance for Vhost PVP Vector_RX Path
> +        """
> +        virtio_pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=1,in_order=1",
> +                            "path": ""}
> +        for i in self.verify_queue:
> +            self.nb_cores = i
> +            self.queue_number = i
> +            self.get_core_mask()
> +            self.start_vhost_testpmd()
> +            self.start_virtio_testpmd(virtio_pmd_arg)
> +            self.send_and_verify("virtio 1.1 inorder mergeable")
> +            self.close_all_testpmd()
> +
> +        self.result_table_print()
> +        self.verify_liner_for_multi_queue()
> +
> +    def
> test_loopback_with_virtio11_inorder_nonmergeable_path_multi_queue(se
> lf):
> +        """
> +        performance for Vhost PVP Vector_RX Path
> +        """
> +        virtio_pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=0,in_order=1",
> +                            "path": ""}
> +        for i in self.verify_queue:
> +            self.nb_cores = i
> +            self.queue_number = i
> +            self.get_core_mask()
> +            self.start_vhost_testpmd()
> +            self.start_virtio_testpmd(virtio_pmd_arg)
> +            self.send_and_verify("virtio 1.1 inorder non-mergeable")
> +            self.close_all_testpmd()
> +
> +        self.result_table_print()
> +        self.verify_liner_for_multi_queue()
> +
>      def tear_down(self):
>          """
>          Run after each test case.
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update
  2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
                   ` (11 preceding siblings ...)
  2020-03-31  3:00 ` Xiao, QimaiX
@ 2020-03-31  3:06 ` Tu, Lijuan
  12 siblings, 0 replies; 34+ messages in thread
From: Tu, Lijuan @ 2020-03-31  3:06 UTC (permalink / raw)
  To: Xiao, QimaiX, dts; +Cc: Xiao, QimaiX

Applied the series, thanks

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of Xiao Qimai
> Sent: Wednesday, March 25, 2020 4:11 PM
> To: dts@dpdk.org
> Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> Subject: [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update
> script according to testplan's update
> 
> Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> ---
>  .../TestSuite_loopback_multi_paths_port_restart.py | 85 +++++++++++++++--
> -----
>  1 file changed, 57 insertions(+), 28 deletions(-)
> 
> diff --git a/tests/TestSuite_loopback_multi_paths_port_restart.py
> b/tests/TestSuite_loopback_multi_paths_port_restart.py
> index 0b54b8d..5f1faad 100644
> --- a/tests/TestSuite_loopback_multi_paths_port_restart.py
> +++ b/tests/TestSuite_loopback_multi_paths_port_restart.py
> @@ -121,15 +121,21 @@ class TestLoopbackPortRestart(TestCase):
>                  break
>              time.sleep(3)
>              loop = loop + 1
> -
>          self.verify("down" not in port_status, "port can not up after restart")
> 
> -    def port_restart(self):
> -        self.vhost.send_expect("stop", "testpmd> ", 120)
> -        self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> -        self.check_port_throughput_after_port_stop()
> -        self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> -        self.vhost.send_expect("port start all", "testpmd> ", 120)
> +    def port_restart(self, restart_times=1):
> +        if restart_times == 1:
> +            self.vhost.send_expect("stop", "testpmd> ", 120)
> +            self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> +            self.check_port_throughput_after_port_stop()
> +            self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> +            self.vhost.send_expect("port start all", "testpmd> ", 120)
> +        else:
> +            for i in range(restart_times):
> +                self.vhost.send_expect("stop", "testpmd> ", 120)
> +                self.vhost.send_expect("port stop 0", "testpmd> ", 120)
> +                self.vhost.send_expect("clear port stats all", "testpmd> ", 120)
> +                self.vhost.send_expect("port start all", "testpmd> ",
> + 120)
>          self.check_port_link_status_after_port_restart()
>          self.vhost.send_expect("set burst 1", "testpmd> ", 120)
>          self.vhost.send_expect("start tx_first 1", "testpmd> ", 120) @@ -156,7
> +162,7 @@ class TestLoopbackPortRestart(TestCase):
>          self.verify(Mpps > 0, "%s can not receive packets" % self.running_case)
>          return Mpps
> 
> -    def send_and_verify(self, case_info, frame_size):
> +    def send_and_verify(self, case_info, frame_size, restart_times=1):
>          """
>          start to send packets and calculate the average throughput
>          """
> @@ -166,7 +172,7 @@ class TestLoopbackPortRestart(TestCase):
>          Mpps = self.calculate_avg_throughput()
>          self.update_table_info(case_info, frame_size, Mpps, "Before Restart")
> 
> -        self.port_restart()
> +        self.port_restart(restart_times)
>          Mpps = self.calculate_avg_throughput()
>          self.update_table_info(case_info, frame_size, Mpps, "After Restart and
> set burst to 1")
> 
> @@ -184,7 +190,7 @@ class TestLoopbackPortRestart(TestCase):
>          self.dut.close_session(self.vhost)
>          self.dut.close_session(self.virtio_user)
> 
> -    def test_vhost_loopback_virtio11_mergeable_mac(self):
> +    def test_loopback_test_with_packed_ring_mergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on virtio1.1 mergeable
> path
>          """
> @@ -193,11 +199,11 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtio1.1 mergeable", frame_size)
> +            self.send_and_verify("packed ring mergeable", frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtio11_normal_mac(self):
> +    def test_loopback_test_with_packed_ring_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port ob virtio1.1 normal path
>          """
> @@ -206,63 +212,86 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtio1.1 normal", frame_size)
> +            self.send_and_verify("packed ring non-mergeable", frame_size)
> +            self.close_all_testpmd()
> +        self.result_table_print()
> +
> +    def test_lookback_test_with_packed_ring_inorder_mergeable_path(self):
> +        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=1,in_order=1",
> +                   "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> +        for frame_size in self.frame_sizes:
> +            self.start_vhost_testpmd()
> +            self.start_virtio_user_testpmd(pmd_arg)
> +            self.send_and_verify("packed ring non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_inorder_mergeable_mac(self):
> +    def
> test_lookback_test_with_packed_ring_inorder_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on inorder mergeable
> path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1 ",
> -                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
> +        pmd_arg = {"version": "packed_vq=1,mrg_rxbuf=0,in_order=1",
> +                          "path": "--tx-offloads=0x0
> + --enable-hw-vlan-strip"}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("inorder mergeable", frame_size)
> +            self.send_and_verify("packed ring inorder non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_inorder_mergeable_off_mac(self):
> +    def test_lookback_test_with_split_ring_inorder_mergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on inorder normal path
>          """
> +        pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> +                          "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
> +        for frame_size in self.frame_sizes:
> +            self.start_vhost_testpmd()
> +            self.start_virtio_user_testpmd(pmd_arg)
> +            self.send_and_verify("split ring inorder mergeable", frame_size)
> +            self.close_all_testpmd()
> +        self.result_table_print()
> +
> +    def test_lookback_test_with_split_ring_inorder_nonmergeable_path(self):
> +        """
> +        performance for [frame_sizes] and restart port on virtio normal path
> +        """
>          pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0 ",
>                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("inorder normal", frame_size)
> +            self.send_and_verify("split ring inorder non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_mergeable_mac(self):
> +    def test_lookback_test_with_split_ring_mergeable_path(self):
>          """
> -        performance for [frame_sizes] and restart port on virtio mergeable
> path
> +        performance for [frame_sizes] and restart port on virtio normal
> + path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1 ",
> +        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
>                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser mergeable", frame_size)
> +            self.send_and_verify("split ring mergeable", frame_size,
> + restart_times=100)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_normal_mac(self):
> +    def test_lookback_test_with_split_ring_nonmergeable_path(self):
>          """
>          performance for [frame_sizes] and restart port on virtio normal path
>          """
> -        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0 ",
> +        pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
>                            "path": "--tx-offloads=0x0 --enable-hw-vlan-strip "}
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser normal", frame_size)
> +            self.send_and_verify("split ring non-mergeable",
> + frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> -    def test_vhost_loopback_virtiouser_vector_rx_mac(self):
> +    def test_loopback_test_with_split_ring_vector_rx_path(self):
>          """
>          performance for frame_sizes and restart port on virtio vector rx
>          """
> @@ -271,7 +300,7 @@ class TestLoopbackPortRestart(TestCase):
>          for frame_size in self.frame_sizes:
>              self.start_vhost_testpmd()
>              self.start_virtio_user_testpmd(pmd_arg)
> -            self.send_and_verify("virtiouser vector_rx", frame_size)
> +            self.send_and_verify("split ring vector_rx", frame_size)
>              self.close_all_testpmd()
>          self.result_table_print()
> 
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2020-03-31  3:06 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-25  8:10 [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: update script according to testplan's update Xiao Qimai
2020-03-25  8:10 ` [dts] [PATCH V1 02/11]loopback_multi_queues: " Xiao Qimai
2020-03-25  8:23   ` Wang, Yinan
2020-03-31  3:00   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 03/11]pvp_virtio_user_2M_hugepages: " Xiao Qimai
2020-03-25  8:23   ` Wang, Yinan
2020-03-31  3:00   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 04/11]pvp_virtio_user_4k_pages: " Xiao Qimai
2020-03-25  8:20   ` Wang, Yinan
2020-03-31  3:00   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 05/11]vhost_enqueue_interrupt: " Xiao Qimai
2020-03-25  8:22   ` Wang, Yinan
2020-03-31  3:00   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 06/11]vhost_event_idx_interrupt: " Xiao Qimai
2020-03-25  8:21   ` Wang, Yinan
2020-03-31  2:59   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 07/11]vhost_user_live_migration: " Xiao Qimai
2020-03-25  8:22   ` Wang, Yinan
2020-03-31  2:59   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 08/11]vhost_virtio_pmd_interrupt: " Xiao Qimai
2020-03-25  8:22   ` Wang, Yinan
2020-03-31  3:00   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 09/11]vhost_virtio_user_interrupt: " Xiao Qimai
2020-03-25  8:22   ` Wang, Yinan
2020-03-31  3:00   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 10/11]virtio_event_idx_interrupt: " Xiao Qimai
2020-03-25  8:22   ` Wang, Yinan
2020-03-31  2:59   ` Xiao, QimaiX
2020-03-25  8:10 ` [dts] [PATCH V1 11/11]virtio_pvp_regression: " Xiao Qimai
2020-03-25  8:22   ` Wang, Yinan
2020-03-31  2:59   ` Xiao, QimaiX
2020-03-25  8:22 ` [dts] [PATCH V1 01/11]loopback_multi_paths_port_restart: " Wang, Yinan
2020-03-31  3:00 ` Xiao, QimaiX
2020-03-31  3:06 ` Tu, Lijuan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).