test suite reviews and discussions
 help / color / mirror / Atom feed
From: Yinan <yinan.wang@intel.com>
To: dts@dpdk.org
Cc: Wang Yinan <yinan.wang@intel.com>
Subject: [dts] [PATCH v1] tests/pvp_vhost_user_reconnect: add packed ring reconncet test cases
Date: Thu, 13 Feb 2020 00:23:33 +0000	[thread overview]
Message-ID: <20200213002333.103356-1-yinan.wang@intel.com> (raw)

From: Wang Yinan <yinan.wang@intel.com>

Signed-off-by: Wang Yinan <yinan.wang@intel.com>
---
 tests/TestSuite_pvp_vhost_user_reconnect.py | 112 +++++++++++++++++++-
 1 file changed, 107 insertions(+), 5 deletions(-)

diff --git a/tests/TestSuite_pvp_vhost_user_reconnect.py b/tests/TestSuite_pvp_vhost_user_reconnect.py
index 06d081f..7b201a0 100644
--- a/tests/TestSuite_pvp_vhost_user_reconnect.py
+++ b/tests/TestSuite_pvp_vhost_user_reconnect.py
@@ -170,12 +170,15 @@ class TestPVPVhostUserReconnect(TestCase):
                     'in this suite, please config it in vhost_sample.cfg file')
         self.checked_vm = True
 
-    def start_vms(self):
+    def start_vms(self, packed=False):
         """
         start two VM
         """
         self.vm_dut = []
         self.vm = []
+        setting_args = "mrg_rxbuf=on,rx_queue_size=1024,tx_queue_size=1024"
+        if packed is True:
+            setting_args = "%s,packed=on" % setting_args
         for i in range(self.vm_num):
             vm_info = VM(self.dut, 'vm%d' % i, 'vhost_sample')
             vm_params = {}
@@ -183,7 +186,7 @@ class TestPVPVhostUserReconnect(TestCase):
             vm_params['opt_path'] = './vhost-net%d' % (i)
             vm_params['opt_mac'] = '52:54:00:00:00:0%d' % (i+1)
             vm_params['opt_server'] = 'server'
-            vm_params['opt_settings'] = 'mrg_rxbuf=on,rx_queue_size=1024,tx_queue_size=1024'
+            vm_params['opt_settings'] = setting_args
             vm_info.set_vm_device(**vm_params)
             self.check_qemu_version(vm_info)
 
@@ -295,7 +298,7 @@ class TestPVPVhostUserReconnect(TestCase):
                             "After relaunch", "1"]
             self.result_table_add(data_row)
 
-    def test_perf_vhost_user_reconnet_one_vm(self):
+    def test_perf_split_ring_reconnet_one_vm(self):
         """
         test reconnect stability test of one vm
         """
@@ -327,7 +330,7 @@ class TestPVPVhostUserReconnect(TestCase):
         self.result_table_print()
         self.stop_all_apps()
 
-    def test_perf_vhost_user_reconnet_two_vms(self):
+    def test_perf_split_ring_reconnet_two_vms(self):
         """
         test reconnect stability test of two vms
         """
@@ -359,7 +362,7 @@ class TestPVPVhostUserReconnect(TestCase):
         self.result_table_print()
         self.stop_all_apps()
 
-    def test_perf_vhost_vm2vm_virtio_net_reconnet_two_vms(self):
+    def test_perf_split_ring_vm2vm_virtio_net_reconnet_two_vms(self):
         """
         test the iperf traffice can resume after reconnet
         """
@@ -394,6 +397,105 @@ class TestPVPVhostUserReconnect(TestCase):
             self.iperf_result_verify(vm_cycle, 'reconnet from vm')
         self.result_table_print()
 
+    def test_perf_packed_ring_reconnet_one_vm(self):
+        """
+        test reconnect stability test of one vm
+        """
+        self.header_row = ["Mode", "FrameSize(B)", "Throughput(Mpps)",
+                            "LineRate(%)", "Cycle", "Queue Number"]
+        self.result_table_create(self.header_row)
+        vm_cycle = 0
+        self.vm_num = 1
+        self.launch_testpmd_as_vhost_user()
+        self.start_vms(packed=True)
+        self.vm_testpmd_start()
+        self.send_and_verify(vm_cycle, "reconnet one vm")
+
+        vm_cycle = 1
+        # reconnet from vhost
+        self.logger.info('now reconnect from vhost')
+        for i in range(self.reconnect_times):
+            self.dut.send_expect("killall -s INT testpmd", "# ")
+            self.launch_testpmd_as_vhost_user()
+            self.send_and_verify(vm_cycle, "reconnet from vhost")
+
+        # reconnet from qemu
+        self.logger.info('now reconnect from vm')
+        for i in range(self.reconnect_times):
+            self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
+            self.start_vms(packed=True)
+            self.vm_testpmd_start()
+            self.send_and_verify(vm_cycle, "reconnet from VM")
+        self.result_table_print()
+        self.stop_all_apps()
+
+    def test_perf_packed_ring_reconnet_two_vms(self):
+        """
+        test reconnect stability test of two vms
+        """
+        self.header_row = ["Mode", "FrameSize(B)", "Throughput(Mpps)",
+                            "LineRate(%)", "Cycle", "Queue Number"]
+        self.result_table_create(self.header_row)
+        vm_cycle = 0
+        self.vm_num = 2
+        self.launch_testpmd_as_vhost_user()
+        self.start_vms(packed=True)
+        self.vm_testpmd_start()
+        self.send_and_verify(vm_cycle, "reconnet two vm")
+
+        vm_cycle = 1
+        # reconnet from vhost
+        self.logger.info('now reconnect from vhost')
+        for i in range(self.reconnect_times):
+            self.dut.send_expect("killall -s INT testpmd", "# ")
+            self.launch_testpmd_as_vhost_user()
+            self.send_and_verify(vm_cycle, "reconnet from vhost")
+
+        # reconnet from qemu
+        self.logger.info('now reconnect from vm')
+        for i in range(self.reconnect_times):
+            self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
+            self.start_vms(packed=True)
+            self.vm_testpmd_start()
+            self.send_and_verify(vm_cycle, "reconnet from VM")
+        self.result_table_print()
+        self.stop_all_apps()
+
+    def test_perf_packed_ring_virtio_net_reconnet_two_vms(self):
+        """
+        test the iperf traffice can resume after reconnet
+        """
+        self.header_row = ["Mode", "[M|G]bits/sec", "Cycle"]
+        self.result_table_create(self.header_row)
+        self.vm_num = 2
+        vm_cycle = 0
+        self.launch_testpmd_as_vhost_user_with_no_pci()
+        self.start_vms(packed=True)
+        self.config_vm_intf()
+        self.start_iperf()
+        self.iperf_result_verify(vm_cycle, 'before reconnet')
+
+        vm_cycle = 1
+        # reconnet from vhost
+        self.logger.info('now reconnect from vhost')
+        for i in range(self.reconnect_times):
+            self.dut.send_expect("killall -s INT testpmd", "# ")
+            self.launch_testpmd_as_vhost_user_with_no_pci()
+            self.start_iperf()
+            self.iperf_result_verify(vm_cycle, 'reconnet from vhost')
+
+        # reconnet from VM
+        self.logger.info('now reconnect from vm')
+        for i in range(self.reconnect_times):
+            self.vm_dut[0].send_expect('rm iperf_server.log', '# ', 10)
+            self.vm_dut[1].send_expect('rm iperf_client.log', '# ', 10)
+            self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
+            self.start_vms(packed=True)
+            self.config_vm_intf()
+            self.start_iperf()
+            self.iperf_result_verify(vm_cycle, 'reconnet from vm')
+        self.result_table_print()
+
     def tear_down(self):
         #
         # Run after each test case.
-- 
2.17.1


             reply	other threads:[~2020-02-13  7:28 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-13  0:23 Yinan [this message]
2020-02-13 10:31 ` Tu, Lijuan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200213002333.103356-1-yinan.wang@intel.com \
    --to=yinan.wang@intel.com \
    --cc=dts@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).