test suite reviews and discussions
 help / color / mirror / Atom feed
From: Ling Wei <weix.ling@intel.com>
To: dts@dpdk.org
Cc: Ling Wei <weix.ling@intel.com>
Subject: [dts] [PATCH V1] tests/vm2vm_virtio_net_perf:Add and modify testcase sync with testplan
Date: Fri,  2 Apr 2021 14:16:15 +0800	[thread overview]
Message-ID: <20210402061615.66125-1-weix.ling@intel.com> (raw)

1.Modify testcase 8 sync with testplan.
2.Add testcase 12 sync with testplan.
3.Add bind dut ports to DPDK-compatible driver method and 
  step in tear_down_all to restore testbed.
4.Adjust code format.

Signed-off-by: Ling Wei <weix.ling@intel.com>
---
 tests/TestSuite_vm2vm_virtio_net_perf.py | 284 +++++++++++++++--------
 1 file changed, 184 insertions(+), 100 deletions(-)

diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py
index b0733bc3..f7dd0d99 100644
--- a/tests/TestSuite_vm2vm_virtio_net_perf.py
+++ b/tests/TestSuite_vm2vm_virtio_net_perf.py
@@ -42,6 +42,7 @@ import re
 import time
 import string
 import random
+import utils
 from virt_common import VM
 from test_case import TestCase
 from pmd_output import PmdOutput
@@ -53,9 +54,7 @@ class TestVM2VMVirtioNetPerf(TestCase):
         self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
         core_config = "1S/5C/1T"
         self.cores_list = self.dut.get_core_list(core_config, socket=self.ports_socket)
-        self.verify(len(self.cores_list) >= 4,
-                    "There has not enough cores to test this suite %s" %
-                    self.suite_name)
+        self.verify(len(self.cores_list) >= 4, "There has not enough cores to test this suite %s" % self.suite_name)
         self.vm_num = 2
         self.virtio_ip1 = "1.1.1.2"
         self.virtio_ip2 = "1.1.1.3"
@@ -68,12 +67,11 @@ class TestVM2VMVirtioNetPerf(TestCase):
         self.vhost = self.dut.new_session(suite="vhost")
         self.pmd_vhost = PmdOutput(self.dut, self.vhost)
         self.app_testpmd_path = self.dut.apps_name['test-pmd']
-        self.dut_ports = self.dut.get_ports()
-        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
         # get cbdma device
         self.cbdma_dev_infos = []
         self.dmas_info = None
         self.device_str = None
+        self.checked_vm = False
         self.dut.restore_interfaces()
 
     def set_up(self):
@@ -130,7 +128,7 @@ class TestVM2VMVirtioNetPerf(TestCase):
             self.dut.send_expect('./usertools/dpdk-devbind.py -u %s' % self.device_str, '# ', 30)
             self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma  %s' % self.device_str, '# ', 60)
 
-    def start_vhost_testpmd(self, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, used_queues=1):
+    def start_vhost_testpmd(self, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None):
         """
         launch the testpmd with different parameters
         """
@@ -156,81 +154,67 @@ class TestVM2VMVirtioNetPerf(TestCase):
             vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' " % (self.base_dir, enable_queues, cbdma_arg_0)
             vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' " % (self.base_dir, enable_queues, cbdma_arg_1)
         eal_params = self.dut.create_eal_parameters(cores=self.cores_list, prefix='vhost', no_pci=no_pci)
-        params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % (nb_cores, used_queues, used_queues)
+        if rxq_txq is None:
+            params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024" % nb_cores
+        else:
+            params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % (nb_cores, rxq_txq, rxq_txq)
         self.command_line = testcmd + eal_params + vdev1 + vdev2 + params
         self.pmd_vhost.execute_cmd(self.command_line, timeout=30)
         self.pmd_vhost.execute_cmd('vhost enable tx all', timeout=30)
         self.pmd_vhost.execute_cmd('start', timeout=30)
 
-    def start_vms(self, path_mode, server_mode=False, opt_queue=1):
+    def start_vms(self, server_mode=False, opt_queue=None, vm_config='vhost_sample'):
         """
         start two VM, each VM has one virtio device
         """
-        if path_mode == 1:
-            setting_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on"
-        elif path_mode == 2:
-            setting_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
-        elif path_mode == 4:
-            setting_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on"
-        elif path_mode == 5:
-            setting_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
-        elif path_mode == 6:
-            setting_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
-        elif path_mode == 10:
-            setting_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
-        elif path_mode == 11:
-            setting_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
-
         for i in range(self.vm_num):
             vm_dut = None
-            vm_info = VM(self.dut, 'vm%d' % i, 'vm')
+            vm_info = VM(self.dut, 'vm%d' % i, vm_config)
             vm_params = {}
             vm_params['driver'] = 'vhost-user'
             if not server_mode:
                 vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i
             else:
                 vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i + ',server'
-            vm_params['opt_queue'] = opt_queue
+            if opt_queue is not None:
+                vm_params['opt_queue'] = opt_queue
             vm_params['opt_mac'] = "52:54:00:00:00:0%d" % (i+1)
-            vm_params['opt_settings'] = setting_args
+            vm_params['opt_settings'] = self.vm_args
             vm_info.set_vm_device(**vm_params)
-            time.sleep(3)
             try:
                 vm_dut = vm_info.start(set_target=False)
                 if vm_dut is None:
                     raise Exception("Set up VM ENV failed")
             except Exception as e:
-                self.logger.error("Failure for %s" % str(e))
-                raise e
-            vm_dut.restore_interfaces()
-
+                print(utils.RED("Failure for %s" % str(e)))
+            self.verify(vm_dut is not None, "start vm failed")
             self.vm_dut.append(vm_dut)
             self.vm.append(vm_info)
 
-    def config_vm_env(self, combined=False, used_queues=1):
+    def config_vm_env(self, combined=False, rxq_txq=1):
         """
         set virtio device IP and run arp protocal
         """
         vm1_intf = self.vm_dut[0].ports_info[0]['intf']
         vm2_intf = self.vm_dut[1].ports_info[0]['intf']
         if combined:
-            self.vm_dut[0].send_expect("ethtool -L %s combined %d" % (vm1_intf, used_queues), "#", 10)
+            self.vm_dut[0].send_expect("ethtool -L %s combined %d" % (vm1_intf, rxq_txq), "#", 10)
         self.vm_dut[0].send_expect("ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10)
         if combined:
-            self.vm_dut[1].send_expect("ethtool -L %s combined %d" % (vm2_intf, used_queues), "#", 10)
+            self.vm_dut[1].send_expect("ethtool -L %s combined %d" % (vm2_intf, rxq_txq), "#", 10)
         self.vm_dut[1].send_expect("ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10)
         self.vm_dut[0].send_expect("arp -s %s %s" % (self.virtio_ip2, self.virtio_mac2), "#", 10)
         self.vm_dut[1].send_expect("arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10)
 
-    def prepare_test_env(self, path_mode, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
-                         server_mode=False, opt_queue=1, combined=False, used_queues=1):
+    def prepare_test_env(self, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
+                         server_mode=False, opt_queue=None, combined=False, rxq_txq=None, vm_config='vhost_sample'):
         """
         start vhost testpmd and qemu, and config the vm env
         """
         self.start_vhost_testpmd(cbdma=cbdma, no_pci=no_pci, client_mode=client_mode, enable_queues=enable_queues,
-                                 nb_cores=nb_cores, used_queues=used_queues)
-        self.start_vms(path_mode=path_mode, server_mode=server_mode, opt_queue=opt_queue)
-        self.config_vm_env(combined=combined, used_queues=used_queues)
+                                 nb_cores=nb_cores, rxq_txq=rxq_txq)
+        self.start_vms(server_mode=server_mode, opt_queue=opt_queue, vm_config=vm_config)
+        self.config_vm_env(combined=combined, rxq_txq=rxq_txq)
 
     def start_iperf(self, iperf_mode='tso'):
         """
@@ -332,11 +316,11 @@ class TestVM2VMVirtioNetPerf(TestCase):
         self.verify(tcp6_info is not None and tcp6_info.group(1) == "on",
                     "tx-tcp6-segmentation in vm not right")
 
-    def check_scp_file_valid_between_vms(self, file_size=1024):
+    def check_scp_file_valid_between_vms(self, file_size=1):
         """
         scp file form VM1 to VM2, check the data is valid
         """
-        # default file_size=1024K
+        # default file_size=1K
         data = ''
         for char in range(file_size * 1024):
             data += random.choice(self.random_string)
@@ -353,68 +337,63 @@ class TestVM2VMVirtioNetPerf(TestCase):
         md5_revd = md5_revd[: md5_revd.find(' ')]
         self.verify(md5_send == md5_revd, 'the received file is different with send file')
 
+    def bind_nic_driver(self, ports, driver=""):
+        if driver == "igb_uio":
+            for port in ports:
+                netdev = self.dut.ports_info[port]['port']
+                driver = netdev.get_nic_driver()
+                if driver != 'igb_uio':
+                    netdev.bind_driver(driver='igb_uio')
+        else:
+            for port in ports:
+                netdev = self.dut.ports_info[port]['port']
+                driver_now = netdev.get_nic_driver()
+                if driver == "":
+                    driver = netdev.default_driver
+                if driver != driver_now:
+                    netdev.bind_driver(driver=driver)
+
     def test_vm2vm_split_ring_iperf_with_tso(self):
         """
         TestCase1: VM2VM split ring vhost-user/virtio-net test with tcp traffic
         """
-        self.prepare_test_env(path_mode=1, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
-                              server_mode=False, opt_queue=1, combined=False, used_queues=1)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on"
+        self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
+                              server_mode=False, opt_queue=1, combined=False, rxq_txq=None)
         self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
 
     def test_vm2vm_split_ring_with_tso_and_cbdma_enable(self):
         """
         TestCase2: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic
         """
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on"
         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
-        self.prepare_test_env(path_mode=1, cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2,
-                              server_mode=False, opt_queue=1, combined=False, used_queues=1)
+        self.prepare_test_env(cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2,
+                              server_mode=False, opt_queue=1, combined=False, rxq_txq=None)
         cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         expect_value = self.get_suite_cfg()['expected_throughput'][self.running_case]
         self.verify(cbdma_value > expect_value, "CBDMA enable performance: %s is lower than CBDMA disable: %s." %(cbdma_value, expect_value))
 
-    def test_vm2vm_packed_ring_iperf_with_tso(self):
-        """
-        TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic
-        """
-        self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
-                              server_mode=False, opt_queue=1, combined=False, used_queues=1)
-        self.start_iperf_and_verify_vhost_xstats_info()
-
     def test_vm2vm_split_ring_iperf_with_ufo(self):
         """
         TestCase3: VM2VM split ring vhost-user/virtio-net test with udp traffic
         """
-        self.prepare_test_env(path_mode=2, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=1,
-                              server_mode=False, opt_queue=1, combined=False, used_queues=1)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
+        self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=1,
+                              server_mode=False, opt_queue=1, combined=False, rxq_txq=None)
         self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='ufo')
 
-    def test_vm2vm_packed_ring_iperf_with_ufo(self):
-        """
-        TestCase8: VM2VM packed ring vhost-user/virtio-net test with udp traffic
-        """
-        self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
-                              server_mode=False, opt_queue=1, combined=False, used_queues=1)
-        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='other')
-
     def test_vm2vm_split_ring_device_capbility(self):
         """
         TestCase4: Check split ring virtio-net device capability
         """
-        self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, used_queues=1)
-        self.start_vms(path_mode=2)
-        self.offload_capbility_check(self.vm_dut[0])
-        self.offload_capbility_check(self.vm_dut[1])
-
-    def test_vm2vm_packed_ring_device_capbility(self):
-        """
-        TestCase9: Check packed ring virtio-net device capability
-        """
-        self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, used_queues=1)
-        self.start_vms(path_mode=4)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
+        self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None)
+        self.start_vms()
         self.offload_capbility_check(self.vm_dut[0])
         self.offload_capbility_check(self.vm_dut[1])
 
-    def test_vm2vm_split_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self):
+    def test_vm2vm_split_ring_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self):
         """
         TestCase5: VM2VM virtio-net split ring mergeable CBDMA enable test with large packet payload valid check
         """
@@ -423,24 +402,25 @@ class TestVM2VMVirtioNetPerf(TestCase):
         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
 
         self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue")
-        self.prepare_test_env(path_mode=5, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,
-                              server_mode=True, opt_queue=8, combined=True, used_queues=8)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
+        self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,
+                              server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm')
+        self.check_scp_file_valid_between_vms()
         iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue])
 
         self.logger.info("Re-launch without CBDMA and used 8 queue")
         self.vhost.send_expect("quit", "# ", 30)
-        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=8)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8)
+        self.check_scp_file_valid_between_vms()
         iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         ipef_result.append(['Disable','mergeable path', 8, iperf_data_cbdma_disable_8_queue])
 
         self.logger.info("Re-launch without CBDMA and used 1 queue")
         self.vhost.send_expect("quit", "# ", 30)
-        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=1)
-        self.config_vm_env(combined=True, used_queues=1)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1)
+        self.config_vm_env(combined=True, rxq_txq=1)
+        self.check_scp_file_valid_between_vms()
         iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue])
 
@@ -453,7 +433,7 @@ class TestVM2VMVirtioNetPerf(TestCase):
                     "CMDMA enable: %s is lower than CBDMA disable: %s" % (
                         iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue))
 
-    def test_vm2vm_split_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self):
+    def test_vm2vm_split_ring_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self):
         """
         TestCase6: VM2VM virtio-net split ring non-mergeable CBDMA enable test with large packet payload valid check
         """
@@ -462,24 +442,25 @@ class TestVM2VMVirtioNetPerf(TestCase):
         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
 
         self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue")
-        self.prepare_test_env(path_mode=6, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,
-                              server_mode=True, opt_queue=8, combined=True, used_queues=8)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
+        self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,
+                              server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm')
+        self.check_scp_file_valid_between_vms()
         iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         ipef_result.append(['Enable', 'no-mergeable path', 8, iperf_data_cbdma_enable_8_queue])
 
         self.logger.info("Re-launch without CBDMA and used 8 queue")
         self.vhost.send_expect("quit", "# ", 30)
-        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=8)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8)
+        self.check_scp_file_valid_between_vms()
         iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         ipef_result.append(['Disable','no-mergeable path', 8, iperf_data_cbdma_disable_8_queue])
 
         self.logger.info("Re-launch without CBDMA and used 1 queue")
         self.vhost.send_expect("quit", "# ", 30)
-        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=1)
-        self.config_vm_env(combined=True, used_queues=1)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1)
+        self.config_vm_env(combined=True, rxq_txq=1)
+        self.check_scp_file_valid_between_vms()
         iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
         ipef_result.append(['Disable','no-mergeable path', 1, iperf_data_cbdma_disable_1_queue])
 
@@ -492,21 +473,123 @@ class TestVM2VMVirtioNetPerf(TestCase):
                     "CMDMA enable: %s is lower than CBDMA disable: %s" % (
                         iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue))
 
+    def test_vm2vm_packed_ring_iperf_with_tso(self):
+        """
+        TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic
+        """
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on"
+        self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
+                              server_mode=False, opt_queue=1, combined=False, rxq_txq=None)
+        self.start_iperf_and_verify_vhost_xstats_info()
+
+    def test_vm2vm_packed_ring_iperf_with_tso_and_cbdma_enable(self):
+        """
+        TestCase8: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic
+        """
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on"
+        self.prepare_test_env(cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2,
+                              server_mode=False, opt_queue=None, combined=False, rxq_txq=None)
+        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='other')
+
+    def test_vm2vm_packed_ring_device_capbility(self):
+        """
+        TestCase9: Check packed ring virtio-net device capability
+        """
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on"
+        self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,
+                              server_mode=False, opt_queue=None, combined=False, rxq_txq=None)
+        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='ufo')
+
     def test_vm2vm_packed_ring_mergeable_path_check_large_packet(self):
         """
         TestCase10: VM2VM packed ring virtio-net mergeable with large packet payload valid check
         """
-        self.prepare_test_env(path_mode=10, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4,
-                              server_mode=True, opt_queue=8, combined=True, used_queues=8)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
+        self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None)
+        self.start_vms()
+        self.offload_capbility_check(self.vm_dut[0])
+        self.offload_capbility_check(self.vm_dut[1])
+
+    def test_vm2vm_packed_ring_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self):
+        """
+        Test Case 11: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check
+        """
+        # This test case need to use QEMU 3.0 to test
+        ipef_result = []
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+
+        self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue")
+        self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
+        self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,
+                              server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm')
+        self.check_scp_file_valid_between_vms()
+        iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
+        ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue])
+
+        self.logger.info("Re-launch without CBDMA and used 8 queue")
+        self.vhost.send_expect("quit", "# ", 30)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8)
+        self.check_scp_file_valid_between_vms()
+        iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
+        ipef_result.append(['Disable', 'mergeable path', 8, iperf_data_cbdma_disable_8_queue])
+
+        self.logger.info("Re-launch without CBDMA and used 1 queue")
+        self.vhost.send_expect("quit", "# ", 30)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1)
+        self.config_vm_env(combined=True, rxq_txq=1)
+        self.check_scp_file_valid_between_vms()
+        iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
+        ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue])
+
+        self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', 'Gbits/sec']
+        self.result_table_create(self.table_header)
+        for table_row in ipef_result:
+            self.result_table_add(table_row)
+        self.result_table_print()
+        self.verify(iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue, \
+                    "CMDMA enable: %s is lower than CBDMA disable: %s" % (
+                        iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue))
 
-    def test_vm2vm_packed_ring_no_mergeable_path_check_large_packet(self):
+    def test_vm2vm_packed_ring_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self):
         """
-        TestCase11: VM2VM packed ring virtio-net non-mergeable with large packet payload valid check
+        Test Case 12: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check
         """
-        self.prepare_test_env(path_mode=11, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4,
-                              server_mode=True, opt_queue=8, combined=True, used_queues=8)
-        self.check_scp_file_valid_between_vms(file_size=1024)
+        # This test case need to use QEMU 3.0 to test
+        ipef_result = []
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+
+        self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue")
+        self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
+        self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,
+                              server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm')
+        self.check_scp_file_valid_between_vms()
+        iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
+        ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue])
+
+        self.logger.info("Re-launch without CBDMA and used 8 queue")
+        self.vhost.send_expect("quit", "# ", 30)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8)
+        self.check_scp_file_valid_between_vms()
+        iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
+        ipef_result.append(['Disable', 'mergeable path', 8, iperf_data_cbdma_disable_8_queue])
+
+        self.logger.info("Re-launch without CBDMA and used 1 queue")
+        self.vhost.send_expect("quit", "# ", 30)
+        self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1)
+        self.config_vm_env(combined=True, rxq_txq=1)
+        self.check_scp_file_valid_between_vms()
+        iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')
+        ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue])
+
+        self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', 'Gbits/sec']
+        self.result_table_create(self.table_header)
+        for table_row in ipef_result:
+            self.result_table_add(table_row)
+        self.result_table_print()
+        self.verify(iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue, \
+                    "CMDMA enable: %s is lower than CBDMA disable: %s" % (
+                        iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue))
 
     def tear_down(self):
         """
@@ -520,5 +603,6 @@ class TestVM2VMVirtioNetPerf(TestCase):
         """
         Run after each test suite.
         """
+        self.bind_nic_driver(self.dut_ports, self.drivername)
         if getattr(self, 'vhost', None):
             self.dut.close_session(self.vhost)
-- 
2.25.1


             reply	other threads:[~2021-04-02  6:17 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-02  6:16 Ling Wei [this message]
2021-04-02  6:19 ` Ling, WeiX
2021-04-12  2:51   ` Wang, Yinan
2021-04-07  1:31 ` Tu, Lijuan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210402061615.66125-1-weix.ling@intel.com \
    --to=weix.ling@intel.com \
    --cc=dts@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).