test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V1] tests/vswitch_sample_cbdma: modify case and add case
@ 2021-07-09 10:52 Xiang An
  2021-07-09 14:07 ` Wang, Yinan
  0 siblings, 1 reply; 4+ messages in thread
From: Xiang An @ 2021-07-09 10:52 UTC (permalink / raw)
  To: dts; +Cc: yinan.wang, Xiang An

Base on newly testplan modify case and add new case.

Signed-off-by: Xiang An <xiangx.an@intel.com>
---
 tests/TestSuite_vswitch_sample_cbdma.py | 590 +++++++++++++++++++++-----------
 1 file changed, 384 insertions(+), 206 deletions(-)

diff --git a/tests/TestSuite_vswitch_sample_cbdma.py b/tests/TestSuite_vswitch_sample_cbdma.py
index df3ab6f..08422ea 100644
--- a/tests/TestSuite_vswitch_sample_cbdma.py
+++ b/tests/TestSuite_vswitch_sample_cbdma.py
@@ -42,6 +42,9 @@ from packet import Packet
 from pktgen import PacketGeneratorHelper
 from pmd_output import PmdOutput
 from virt_common import VM
+from settings import HEADER_SIZE
+import random
+import string
 
 
 class TestVswitchSampleCBDMA(TestCase):
@@ -51,13 +54,12 @@ class TestVswitchSampleCBDMA(TestCase):
         Run at the start of each test suite.
         """
         self.set_max_queues(512)
+        self.build_vhost_app()
+        self.tester_tx_port_num = 1
         self.dut_ports = self.dut.get_ports()
         self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
-        self.tester_tx_port_num = 1
         self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
-        self.dut_ports = self.dut.get_ports()
-        self.socket = self.dut.get_numa_id(self.dut_ports[0])
-        self.cores = self.dut.get_core_list("all", socket=self.socket)
+        self.cores = self.dut.get_core_list("all", socket=self.ports_socket)
         self.vhost_core_list = self.cores[0:2]
         self.vuser0_core_list = self.cores[2:4]
         self.vuser1_core_list = self.cores[4:6]
@@ -89,6 +91,15 @@ class TestVswitchSampleCBDMA(TestCase):
         self.virtio_user1 = self.dut.new_session(suite="virtio-user1")
         self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0)
         self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1)
+        self.mrg_rxbuf = 0
+        self.in_order = 0
+        self.vectorized = 0
+        self.packed_vq = 0
+        self.server = 0
+        self.random_string = string.ascii_letters + string.digits
+        self.virtio_ip0 = "1.1.1.2"
+        self.virtio_ip1 = "1.1.1.3"
+        self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE['ip']
 
     def set_up(self):
         """
@@ -97,11 +108,6 @@ class TestVswitchSampleCBDMA(TestCase):
         self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
         self.dut.send_expect("killall -I qemu-system-x86_64", '#', 20)
 
-    def set_async_threshold(self, async_threshold=256):
-        self.logger.info("Configure async_threshold to {}".format(async_threshold))
-        self.dut.send_expect("sed -i -e 's/f.async_threshold = .*$/f.async_threshold = {};/' "
-                             "./examples/vhost/main.c".format(async_threshold), "#", 20)
-
     def set_max_queues(self, max_queues=512):
         self.logger.info("Configure MAX_QUEUES to {}".format(max_queues))
         self.dut.send_expect("sed -i -e 's/#define MAX_QUEUES .*$/#define MAX_QUEUES {}/' "
@@ -147,32 +153,26 @@ class TestVswitchSampleCBDMA(TestCase):
         # After started dpdk-vhost app, wait 3 seconds
         time.sleep(3)
 
-    def start_virtio_testpmd(self, pmd_session, dev_mac, dev_id, cores, prefix,  enable_queues=1, server_mode=False,
-                             vectorized_path=False, nb_cores=1, used_queues=1):
+    def start_virtio_testpmd(self, pmd_session, dev_mac, dev_id, cores, prefix, enable_queues=1, nb_cores=1,
+                             used_queues=1):
         """
         launch the testpmd as virtio with vhost_net0
         """
-        if server_mode:
-            if vectorized_path:
-                eal_params = " --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={},server=1," \
-                             "mrg_rxbuf=0,in_order=1,vectorized=1".format(dev_mac, dev_id, enable_queues)
-            else:
-                eal_params = " --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={},server=1"\
-                    .format(dev_mac, dev_id, enable_queues)
-        else:
-            if vectorized_path:
-                eal_params = " --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={}," \
-                             "mrg_rxbuf=0,in_order=1,vectorized=1".format(dev_mac, dev_id, enable_queues)
-            else:
-                eal_params = " --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={}"\
-                    .format(dev_mac, dev_id, enable_queues)
+        eal_params = " --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={},mrg_rxbuf={},in_order={}" \
+            .format(dev_mac, dev_id, enable_queues, self.mrg_rxbuf, self.in_order)
+        if self.vectorized == 1:
+            eal_params = eal_params + ",vectorized=1"
+        if self.packed_vq == 1:
+            eal_params = eal_params + ',packed_vq=1'
+        if self.server:
+            eal_params = eal_params + ",server=1"
         if self.check_2M_env:
             eal_params += " --single-file-segments"
         params = "--rxq={} --txq={} --txd=1024 --rxd=1024 --nb-cores={}".format(used_queues, used_queues, nb_cores)
         pmd_session.start_testpmd(cores=cores, param=params, eal_param=eal_params, no_pci=True, ports=[], prefix=prefix,
                                   fixed_prefix=True)
 
-    def start_vms(self, mode=0, mergeable=True):
+    def start_vms(self, mode=0, mergeable=True, server_mode=False, set_target=True):
         """
         start two VM, each VM has one virtio device
         """
@@ -181,7 +181,7 @@ class TestVswitchSampleCBDMA(TestCase):
         elif mode == 1:
             setting_args = "disable-modern=false"
         elif mode == 2:
-            setting_args = "disable-modern=false,packed=on"
+            setting_args = "disable-modern=true,packed=on"
         if mergeable is True:
             setting_args += "," + "mrg_rxbuf=on"
         else:
@@ -193,13 +193,16 @@ class TestVswitchSampleCBDMA(TestCase):
             vm_info = VM(self.dut, 'vm%d' % i, 'vhost_sample')
             vm_params = {}
             vm_params['driver'] = 'vhost-user'
-            vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i
-            vm_params['opt_mac'] = "52:54:00:00:00:0%d" % (i+1)
+            if server_mode:
+                vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i + ',server'
+            else:
+                vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i
+            vm_params['opt_mac'] = "52:54:00:00:00:0%d" % (i + 1)
             vm_params['opt_settings'] = setting_args
             vm_info.set_vm_device(**vm_params)
             time.sleep(3)
             try:
-                vm_dut = vm_info.start()
+                vm_dut = vm_info.start(set_target=set_target)
                 if vm_dut is None:
                     raise Exception("Set up VM ENV failed")
             except Exception as e:
@@ -212,7 +215,7 @@ class TestVswitchSampleCBDMA(TestCase):
         """
         launch the testpmd in vm
         """
-        self.vm_cores = [1,2]
+        self.vm_cores = [1, 2]
         param = "--rxq=1 --txq=1 --nb-cores=1 --txd=1024 --rxd=1024"
         pmd_session.start_testpmd(cores=self.vm_cores, param=param)
 
@@ -255,7 +258,8 @@ class TestVswitchSampleCBDMA(TestCase):
             dmas_info += dmas
         self.dmas_info = dmas_info[:-1]
         self.device_str = ' '.join(used_cbdma)
-        self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s' % (self.drivername, self.device_str), '# ', 60)
+        self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s' % (self.drivername, self.device_str),
+                             '# ', 60)
 
     def send_vlan_packet(self, dts_mac, pkt_size=64, pkt_count=1):
         """
@@ -281,23 +285,23 @@ class TestVswitchSampleCBDMA(TestCase):
         tgen_input = []
         rx_port = self.tester.get_local_port(self.dut_ports[0])
         tx_port = self.tester.get_local_port(self.dut_ports[0])
-        for item in range(port_num):
-            for dst_mac in dst_mac_list:
-                pkt = Packet(pkt_type='VLAN_UDP', pkt_len=frame_size)
-                pkt.config_layer('ether', {'dst': dst_mac})
-                pkt.config_layer('vlan', {'vlan': 1000})
-                pcap = os.path.join(self.out_path, "vswitch_sample_cbdma_%s_%s_%s.pcap" % (item, dst_mac, frame_size))
-                pkt.save_pcapfile(None, pcap)
-                tgen_input.append((rx_port, tx_port, pcap))
+        for dst_mac in dst_mac_list:
+            payload_size = frame_size - self.headers_size
+            pkt = Packet(pkt_type='VLAN_UDP', pkt_len=payload_size)
+            pkt.config_layer('ether', {'dst': dst_mac})
+            pkt.config_layer('vlan', {'vlan': 1000})
+            pcap = os.path.join(self.out_path, "vswitch_sample_cbdma_%s_%s.pcap" % (dst_mac, frame_size))
+            pkt.save_pcapfile(self.tester, pcap)
+            tgen_input.append((rx_port, tx_port, pcap))
         return tgen_input
 
-    def perf_test(self, frame_size, dst_mac_list):
+    def perf_test(self, frame_sizes, dst_mac_list):
         # Create test results table
         table_header = ['Frame Size(Byte)', 'Throughput(Mpps)']
         self.result_table_create(table_header)
         # Begin test perf
         test_result = {}
-        for frame_size in frame_size:
+        for frame_size in frame_sizes:
             self.logger.info("Test running at parameters: " + "framesize: {}".format(frame_size))
             tgenInput = self.config_stream(frame_size, self.tester_tx_port_num, dst_mac_list)
             # clear streams before add new streams
@@ -307,130 +311,173 @@ class TestVswitchSampleCBDMA(TestCase):
             # set traffic option
             traffic_opt = {'duration': 5}
             _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=traffic_opt)
-            self.verify(pps > 0, "No traffic detected")
+            self.virtio_user0_pmd.execute_cmd('show port stats all')
             throughput = pps / 1000000.0
+            self.verify(throughput > 0, "No traffic detected")
             test_result[frame_size] = throughput
             self.result_table_add([frame_size, throughput])
         self.result_table_print()
         return test_result
 
-    def pvp_test_with_cbdma(self, socket_num=1, with_cbdma=True, cbdma_num=1):
-        self.frame_sizes = [64, 512, 1024, 1518]
-        self.start_vhost_app(with_cbdma=with_cbdma, cbdma_num=cbdma_num, socket_num=socket_num, client_mode=False)
-        self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
-                                  cores=self.vuser0_core_list, prefix='testpmd0', enable_queues=1, server_mode=False,
-                                  vectorized_path=False, nb_cores=1, used_queues=1)
+    def pvp_test_with_cbdma(self):
+        frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]
         self.virtio_user0_pmd.execute_cmd('set fwd mac')
         self.virtio_user0_pmd.execute_cmd('start tx_first')
         self.virtio_user0_pmd.execute_cmd('stop')
+        self.virtio_user0_pmd.execute_cmd('set fwd mac')
         self.virtio_user0_pmd.execute_cmd('start')
         dst_mac_list = [self.virtio_dst_mac0]
-        perf_result = self.perf_test(frame_size=self.frame_sizes,dst_mac_list=dst_mac_list)
+        perf_result = self.perf_test(frame_sizes, dst_mac_list)
         return perf_result
 
-    def test_perf_check_with_cbdma_channel_using_vhost_async_driver(self):
+    def test_perf_pvp_check_with_cbdma_channel_using_vhost_async_driver(self):
         """
         Test Case1: PVP performance check with CBDMA channel using vhost async driver
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(1)
-
-        # test cbdma copy
-        # CBDMA copy needs vhost enqueue with cbdma channel using parameter '-dmas'
-        self.set_async_threshold(1518)
-        self.build_vhost_app()
-        cbmda_copy = self.pvp_test_with_cbdma(socket_num=1, with_cbdma=True, cbdma_num=1)
-        self.virtio_user0_pmd.execute_cmd("quit", "#")
-        self.vhost_user.send_expect("^C", "# ", 20)
-
-        # test sync copy
-        # Sync copy needs vhost enqueue with cbdma channel, but threshold ( can be adjusted by change value of
-        # f.async_threshold in dpdk code) is larger than forwarding packet length
-        self.set_async_threshold(0)
-        self.build_vhost_app()
-        sync_copy = self.pvp_test_with_cbdma(socket_num=1, with_cbdma=True, cbdma_num=1)
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+
+        self.start_vhost_app(with_cbdma=True, cbdma_num=1, socket_num=1, client_mode=True)
+        # launch packed ring in_order vectorized with cbdma
+        self.mrg_rxbuf = 0
+        self.in_order = 1
+        self.vectorized = 1
+        self.packed_vq = 1
+        self.server = 1
+        self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
+                                  cores=self.vuser0_core_list, prefix='testpmd0', nb_cores=1, used_queues=1)
+        packed_ring_result = self.pvp_test_with_cbdma()
+
+        # relaunch split ring in_order vectorized with cbdma
+        self.mrg_rxbuf = 1
+        self.in_order = 1
+        self.vectorized = 1
+        self.packed_vq = 0
+        self.server = 1
         self.virtio_user0_pmd.execute_cmd("quit", "#")
-        self.vhost_user.send_expect("^C", "# ", 20)
-
-        # test CPU copy
-        # CPU copy means vhost enqueue w/o cbdma channel
-        cpu_copy = self.pvp_test_with_cbdma(socket_num=1, with_cbdma=False, cbdma_num=0)
+        self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
+                                  cores=self.vuser0_core_list, prefix='testpmd0', nb_cores=1, used_queues=1)
+        split_ring_reult = self.pvp_test_with_cbdma()
         self.virtio_user0_pmd.execute_cmd("quit", "#")
         self.vhost_user.send_expect("^C", "# ", 20)
 
         self.table_header = ['Frame Size(Byte)', 'Mode', 'Throughput(Mpps)']
         self.result_table_create(self.table_header)
-        for key in cbmda_copy.keys():
-            perf_result.append([key, 'cbdma_copy', cbmda_copy[key]])
-        for key in sync_copy.keys():
-            perf_result.append([key, 'sync_copy', sync_copy[key]])
-        for key in cpu_copy.keys():
-            perf_result.append([key, 'cpu_copy', cpu_copy[key]])
+        for key in packed_ring_result.keys():
+            perf_result.append([key, 'packed_ring', packed_ring_result[key]])
+        for key in split_ring_reult.keys():
+            perf_result.append([key, 'split_ring', split_ring_reult[key]])
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
+        self.virtio_user0_pmd.execute_cmd("quit", "#")
+        self.vhost_user.send_expect("^C", "# ", 20)
+
+    def config_stream_imix(self, frame_sizes, port_num, dst_mac_list):
+        tgen_input = []
+        rx_port = self.tester.get_local_port(self.dut_ports[0])
+        tx_port = self.tester.get_local_port(self.dut_ports[0])
+        for dst_mac in dst_mac_list:
+            for frame_size in frame_sizes:
+                payload_size = frame_size - self.headers_size
+                pkt = Packet()
+                pkt.assign_layers(['ether', 'ipv4', 'raw'])
+                pkt.config_layers([('ether', {'dst': '%s' % dst_mac}), ('ipv4', {'src': '1.1.1.1'}),
+                                   ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+                pcap = os.path.join(self.out_path, "vswitch_sample_cbdma_%s_%s.pcap" % (dst_mac, frame_size))
+                pkt.save_pcapfile(self.tester, pcap)
+                tgen_input.append((rx_port, tx_port, pcap))
+        return tgen_input
 
-    def pvp_test_with_multi_cbdma(self, socket_num=2, with_cbdma=True, cbdma_num=1, launch_virtio=True, quit_vhost=False):
-        self.frame_sizes = [1518]
-        self.start_vhost_app(with_cbdma=with_cbdma, cbdma_num=cbdma_num, socket_num=socket_num, client_mode=True)
-        if launch_virtio:
-            self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
-                                      cores=self.vuser0_core_list, prefix='testpmd0', enable_queues=1, server_mode=True,
-                                      nb_cores=1, used_queues=1)
-            self.start_virtio_testpmd(pmd_session=self.virtio_user1_pmd, dev_mac=self.virtio_dst_mac1, dev_id=1,
-                                      cores=self.vuser1_core_list, prefix='testpmd1', enable_queues=1, server_mode=True,
-                                      vectorized_path=True, nb_cores=1, used_queues=1)
-            self.virtio_user0_pmd.execute_cmd('set fwd mac')
-            self.virtio_user0_pmd.execute_cmd('start tx_first')
+    def perf_test_imix(self, frame_sizes, dst_mac_list):
+        # Create test results table
+        table_header = ['Frame Size(Byte)', 'Throughput(Mpps)']
+        self.result_table_create(table_header)
+        # Begin test perf
+        test_result = {}
+        tgenInput = self.config_stream_imix(frame_sizes, self.tester_tx_port_num, dst_mac_list)
+        fields_config = {'ip': {'src': {'action': 'random'}, }, }
+        # clear streams before add new streams
+        self.tester.pktgen.clear_streams()
+        # run packet generator
+        streams = self.pktgen_helper.prepare_stream_from_tginput(tgenInput, 100, fields_config, self.tester.pktgen)
+        # set traffic option
+        traffic_opt = {'delay': 5, 'duration': 5}
+        _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=traffic_opt)
+        self.virtio_user0_pmd.execute_cmd("show port stats all")
+        self.virtio_user1_pmd.execute_cmd("show port stats all")
+        throughput = pps / 1000000.0
+        self.verify(throughput > 0, "traffic is too low: throughput=%s" % throughput)
+        test_result['imix'] = throughput
+        self.result_table_add(['imix', throughput])
+        self.result_table_print()
+        return test_result
+
+    def pvp_test_with_multi_cbdma(self, relaunch=False):
+        frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]
+        if relaunch:
             self.virtio_user0_pmd.execute_cmd('stop')
-            self.virtio_user0_pmd.execute_cmd('start')
-            self.virtio_user1_pmd.execute_cmd('set fwd mac')
-            self.virtio_user1_pmd.execute_cmd('start tx_first')
             self.virtio_user1_pmd.execute_cmd('stop')
-            self.virtio_user1_pmd.execute_cmd('start')
-        else:
-            self.virtio_user0_pmd.execute_cmd('stop', 'testpmd> ', 30)
-            self.virtio_user0_pmd.execute_cmd('start tx_first', 'testpmd> ', 30)
-            self.virtio_user1_pmd.execute_cmd('stop', 'testpmd> ', 30)
-            self.virtio_user1_pmd.execute_cmd('start tx_first', 'testpmd> ', 30)
+            self.virtio_user0_pmd.execute_cmd('clear port stats all')
+            self.virtio_user1_pmd.execute_cmd('clear port stats all')
+        self.virtio_user0_pmd.execute_cmd('set fwd mac')
+        self.virtio_user1_pmd.execute_cmd('set fwd mac')
+        self.virtio_user0_pmd.execute_cmd('start tx_first')
+        self.virtio_user1_pmd.execute_cmd('start tx_first')
         dst_mac_list = [self.virtio_dst_mac0, self.virtio_dst_mac1]
-        perf_result = self.perf_test(self.frame_sizes, dst_mac_list)
-        if quit_vhost:
-            self.vhost_user.send_expect("^C", "# ", 20)
+        perf_result = self.perf_test_imix(frame_sizes, dst_mac_list)
+        out0 = self.virtio_user0_pmd.execute_cmd('show port stats all')
+        out1 = self.virtio_user1_pmd.execute_cmd('show port stats all')
+        rx_num0 = re.compile('RX-packets: (.*?)\s+?').findall(out0, re.S)
+        rx_num1 = re.compile('RX-packets: (.*?)\s+?').findall(out1, re.S)
+        self.verify(int(rx_num0[0])>32, 'virtio-user0 not receive pkts from tester')
+        self.verify(int(rx_num1[0])>32, 'virtio-user1 not receive pkts from tester')
         return perf_result
 
-    def test_perf_check_with_multiple_cbdma_channels_using_vhost_async_driver(self):
+    def test_perf_pvp_test_with_two_vm_and_two_cbdma_channels_using_vhost_async_driver(self):
         """
-        Test Case2: PVP test with multiple CBDMA channels using vhost async driver
+        Test Case2: PVP test with two VM and two CBDMA channels using vhost async driver
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(2)
-        self.set_async_threshold(256)
-        self.build_vhost_app()
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
 
         self.logger.info("Launch vhost app perf test")
-        before_relunch= self.pvp_test_with_multi_cbdma(socket_num=2, with_cbdma=True, cbdma_num=2, launch_virtio=True, quit_vhost=True)
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        self.mrg_rxbuf = 0
+        self.in_order = 1
+        self.vectorized = 1
+        self.packed_vq = 0
+        self.server = 1
+        self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
+                                  cores=self.vuser0_core_list, prefix='testpmd0', nb_cores=1, used_queues=1)
+        self.mrg_rxbuf = 1
+        self.in_order = 1
+        self.vectorized = 1
+        self.packed_vq = 0
+        self.server = 1
+        self.start_virtio_testpmd(pmd_session=self.virtio_user1_pmd, dev_mac=self.virtio_dst_mac1, dev_id=1,
+                                  cores=self.vuser1_core_list, prefix='testpmd1', nb_cores=1, used_queues=1)
+        before_relunch = self.pvp_test_with_multi_cbdma()
 
         self.logger.info("Relaunch vhost app perf test")
-        after_relunch = self.pvp_test_with_multi_cbdma(socket_num=2, with_cbdma=True, cbdma_num=2, launch_virtio=False, quit_vhost=False)
-        self.virtio_user0_pmd.execute_cmd("quit", "#")
-        self.virtio_user1_pmd.execute_cmd("quit", "#")
         self.vhost_user.send_expect("^C", "# ", 20)
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        after_relunch = self.pvp_test_with_multi_cbdma(relaunch=True)
 
         self.table_header = ['Frame Size(Byte)', 'Mode', 'Throughput(Mpps)']
         self.result_table_create(self.table_header)
         for key in before_relunch.keys():
-            perf_result.append([key, 'Before Re-launch vhost', before_relunch[key]])
+            perf_result.append(['imix', 'Before Re-launch vhost', before_relunch[key]])
         for key in after_relunch.keys():
-            perf_result.append([key, 'After Re-launch vhost', after_relunch[key]])
+            perf_result.append(['imix', 'After Re-launch vhost', after_relunch[key]])
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
+        self.virtio_user0_pmd.execute_cmd("quit", "#")
+        self.virtio_user1_pmd.execute_cmd("quit", "#")
+        self.vhost_user.send_expect("^C", "# ", 20)
 
-        self.verify(abs(before_relunch[1518] - after_relunch[1518]) / before_relunch[1518] < 0.1, "Perf is unstable, \
-        before relaunch vhost app: %s, after relaunch vhost app: %s" % (before_relunch[1518], after_relunch[1518]))
-
-    def get_receive_throughput(self, pmd_session, count=5):
+    def get_receive_throughput(self, pmd_session, count=10):
         i = 0
         while i < count:
             pmd_session.execute_cmd('show port stats all')
@@ -453,18 +500,22 @@ class TestVswitchSampleCBDMA(TestCase):
         pmd_session.execute_cmd('set eth-peer 0 %s' % eth_peer_mac)
 
     def send_pkts_from_testpmd1(self, pmd_session, pkt_len):
-        pmd_session.execute_cmd('set txpkts %s' % pkt_len)
+        pmd_session.execute_cmd('stop')
+        if pkt_len in [64, 2000]:
+            pmd_session.execute_cmd('set txpkts %s' % pkt_len)
+        elif pkt_len == 8000:
+            pmd_session.execute_cmd('set txpkts 2000,2000,2000,2000')
+        elif pkt_len == 'imix':
+            pmd_session.execute_cmd('set txpkts 64,256,2000,64,256,2000')
         pmd_session.execute_cmd('start tx_first')
 
-    def vm2vm_check_with_two_cbdma(self, with_cbdma=True, cbdma_num=2, socket_num=2):
-        frame_sizes = [256, 2000]
-        self.start_vhost_app(with_cbdma=with_cbdma, cbdma_num=cbdma_num, socket_num=socket_num, client_mode=False)
-        self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
-                                  cores=self.vuser0_core_list, prefix='testpmd0', enable_queues=1, server_mode=False,
-                                  nb_cores=1, used_queues=1)
-        self.start_virtio_testpmd(pmd_session=self.virtio_user1_pmd, dev_mac=self.virtio_dst_mac1, dev_id=1,
-                                  cores=self.vuser1_core_list, prefix='testpmd1', enable_queues=1, server_mode=False,
-                                  vectorized_path=True, nb_cores=1, used_queues=1)
+    def vm2vm_check_with_two_cbdma(self, relaunch=False):
+        frame_sizes = [64, 2000, 8000, 'imix']
+        if relaunch:
+            self.virtio_user0_pmd.execute_cmd('stop')
+            self.virtio_user0_pmd.execute_cmd('clear port stats all')
+            self.virtio_user1_pmd.execute_cmd('stop')
+            self.virtio_user1_pmd.execute_cmd('clear port stats all')
         self.set_testpmd0_param(self.virtio_user0_pmd, self.virtio_dst_mac1)
         self.set_testpmd1_param(self.virtio_user1_pmd, self.virtio_dst_mac0)
 
@@ -480,59 +531,52 @@ class TestVswitchSampleCBDMA(TestCase):
             self.result_table_print()
         return rx_throughput
 
-    def test_vm2vm_check_with_two_cbdma_channels_using_vhost_async_driver(self):
+    def test_vm2vm_fwd_test_with_two_cbdma_channels(self):
         """
-        Test Case3: VM2VM performance test with two CBDMA channels using vhost async driver
+        Test Case3: VM2VM forwarding test with two CBDMA channels
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(2)
-        self.set_async_threshold(256)
-        self.build_vhost_app()
-
-        cbdma_enable = self.vm2vm_check_with_two_cbdma(with_cbdma=True, cbdma_num=2, socket_num=2)
-        self.virtio_user0_pmd.execute_cmd("quit", "#")
-        self.virtio_user1_pmd.execute_cmd("quit", "#")
-        self.vhost_user.send_expect("^C", "# ", 20)
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
 
-        cbdma_disable = self.vm2vm_check_with_two_cbdma(with_cbdma=False, cbdma_num=2, socket_num=2)
-        self.virtio_user0_pmd.execute_cmd("quit", "#")
-        self.virtio_user1_pmd.execute_cmd("quit", "#")
+        self.logger.info("Launch vhost app perf test")
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        self.mrg_rxbuf = 1
+        self.in_order = 1
+        self.vectorized = 1
+        self.packed_vq = 0
+        self.server = 1
+        self.start_virtio_testpmd(pmd_session=self.virtio_user0_pmd, dev_mac=self.virtio_dst_mac0, dev_id=0,
+                                  cores=self.vuser0_core_list, prefix='testpmd0', nb_cores=1, used_queues=1)
+        self.mrg_rxbuf = 1
+        self.in_order = 1
+        self.vectorized = 1
+        self.packed_vq = 0
+        self.server = 1
+        self.start_virtio_testpmd(pmd_session=self.virtio_user1_pmd, dev_mac=self.virtio_dst_mac1, dev_id=1,
+                                  cores=self.vuser1_core_list, prefix='testpmd1', nb_cores=1, used_queues=1)
+        before_relunch_result = self.vm2vm_check_with_two_cbdma()
+        self.logger.info("Relaunch vhost app perf test")
         self.vhost_user.send_expect("^C", "# ", 20)
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        after_relunch_result = self.vm2vm_check_with_two_cbdma(relaunch=True)
 
-        self.table_header = ['Frame Size(Byte)', 'CBDMA Enable/Disable', 'Throughput(Mpps)']
+        self.table_header = ['Frame Size(Byte)', 'Mode', 'Throughput(Mpps)']
         self.result_table_create(self.table_header)
-        for key in cbdma_enable.keys():
-            perf_result.append([key, 'Enable', cbdma_enable[key]])
-        for key in cbdma_disable.keys():
-            perf_result.append([key, 'Disable', cbdma_disable[key]])
+        for key in before_relunch_result.keys():
+            perf_result.append([key, 'Before Re-launch vhost', before_relunch_result[key]])
+        for key in after_relunch_result.keys():
+            perf_result.append([key, 'After Re-launch vhost ', after_relunch_result[key]])
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
+        self.virtio_user0_pmd.execute_cmd("quit", "# ")
+        self.virtio_user1_pmd.execute_cmd("quit", "# ")
+        self.vhost_user.send_expect("^C", "# ", 20)
 
-        for cbdma_key in cbdma_enable.keys():
-            if cbdma_key == '2000':
-                self.verify(cbdma_enable[cbdma_key] > cbdma_disable[cbdma_key],
-                            "CBDMA Enable Performance {} should better than CBDMA Disable Performance {} when send 2000"
-                            " length packets".format(cbdma_enable[cbdma_key], cbdma_disable[cbdma_key]))
-            elif cbdma_key == '256':
-                self.verify(cbdma_disable[cbdma_key] > cbdma_enable[cbdma_key],
-                            "CBDMA Enable Performance {}  should lower than CBDMA Disable Performance {} when send 256"
-                            " length packets".format(cbdma_enable[cbdma_key], cbdma_disable[cbdma_key]))
-
-    def vm2vm_check_with_two_vhost_device(self, with_cbdma=True, cbdma_num=2, socket_num=2, launch=True):
-        frame_sizes = [256, 2000]
-        if launch:
-            self.start_vhost_app(with_cbdma=with_cbdma, cbdma_num=cbdma_num, socket_num=socket_num, client_mode=False)
-            self.start_vms(mode=0, mergeable=False)
-            self.vm0_pmd = PmdOutput(self.vm_dut[0])
-            self.vm1_pmd = PmdOutput(self.vm_dut[1])
-            self.start_vm_testpmd(self.vm0_pmd)
-            self.start_vm_testpmd(self.vm1_pmd)
-        self.set_testpmd0_param(self.vm0_pmd, self.vm_dst_mac1)
-        self.set_testpmd1_param(self.vm1_pmd, self.vm_dst_mac0)
-
+    def vm2vm_check_with_two_vhost_device(self):
         rx_throughput = {}
-        for frame_size in frame_sizes:
+        self.frame_sizes = [64, 2000, 8000, 'imix']
+        for frame_size in self.frame_sizes:
             self.send_pkts_from_testpmd1(pmd_session=self.vm1_pmd, pkt_len=frame_size)
             # Create test results table
             table_header = ['Frame Size(Byte)', 'Throughput(Mpps)']
@@ -543,52 +587,41 @@ class TestVswitchSampleCBDMA(TestCase):
             self.result_table_print()
         return rx_throughput
 
-    def start_vms_testpmd_and_test(self, launch, quit_vm_testpmd=False):
-        # start vm0 amd vm1 testpmd, send 256 and 2000 length packets from vm1 testpmd
-        perf_result = self.vm2vm_check_with_two_vhost_device(with_cbdma=True, cbdma_num=2, socket_num=2, launch=launch)
-        # stop vm1 and clear vm1 stats
-        self.vm1_pmd.execute_cmd("stop")
-        self.vm1_pmd.execute_cmd("clear port stats all")
-        # stop vm0 and clear vm0 stats
-        self.vm0_pmd.execute_cmd("stop")
-        self.vm0_pmd.execute_cmd("clear port stats all")
-        # only start vm0 and send packets from tester, and check vm0 can receive more then tester send packets' count
-        self.vm0_pmd.execute_cmd("start")
-        self.send_vlan_packet(dts_mac=self.vm_dst_mac0, pkt_size=64, pkt_count=100)
-        time.sleep(3)
-        self.verify_receive_packet(pmd_session=self.vm0_pmd, expected_pkt_count=100)
-        # stop vm0
-        self.vm0_pmd.execute_cmd("stop")
-        # only start vm1 and send packets from tester, and check vm1 can receive more then tester send packets' count
-        self.vm1_pmd.execute_cmd("start")
-        # clear vm1 stats after send start command
-        self.vm1_pmd.execute_cmd("clear port stats all")
-        self.send_vlan_packet(dts_mac=self.vm_dst_mac1, pkt_size=64, pkt_count=100)
-        time.sleep(3)
-        self.verify_receive_packet(pmd_session=self.vm1_pmd, expected_pkt_count=100)
-        if quit_vm_testpmd:
-            self.vm0_pmd.execute_cmd("quit", "#")
-            self.vm1_pmd.execute_cmd("quit", "#")
+    def start_vms_testpmd_and_test(self, need_start_vm=True):
+        if need_start_vm:
+            self.start_vms(mode=2, mergeable=True, server_mode=True)
+            self.vm0_pmd = PmdOutput(self.vm_dut[0])
+            self.vm1_pmd = PmdOutput(self.vm_dut[1])
+        self.start_vm_testpmd(self.vm0_pmd)
+        self.start_vm_testpmd(self.vm1_pmd)
+        self.set_testpmd0_param(self.vm0_pmd, self.vm_dst_mac1)
+        self.set_testpmd1_param(self.vm1_pmd, self.vm_dst_mac0)
+        perf_result = self.vm2vm_check_with_two_vhost_device()
+        self.vm0_pmd.quit()
+        self.vm1_pmd.quit()
         return perf_result
 
-    def test_vm2vm_check_with_two_vhost_device_using_vhost_async_driver(self):
+    def test_vm2vm_test_with_cbdma_channels_register_or_unregister_stable_check(self):
         """
-        Test Case4: VM2VM test with 2 vhost device using vhost async driver
+        Test Case4: VM2VM test with cbdma channels register/unregister stable check
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(2)
-        self.set_async_threshold(256)
-        self.build_vhost_app()
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+
+        self.logger.info("Before rebind perf VM Driver test")
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        before_rebind = self.start_vms_testpmd_and_test(need_start_vm=True)
 
-        before_rebind = self.start_vms_testpmd_and_test(launch=True, quit_vm_testpmd=True)
+        self.logger.info("After rebind perf VM Driver test")
+        # repeat bind 50 time from virtio-pci to vfio-pci
+        self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)
+        self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)
+        self.vhost_user.send_expect("^C", "# ", 20)
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        after_bind = self.start_vms_testpmd_and_test(need_start_vm=False)
         # repeat bind 50 time from virtio-pci to vfio-pci
         self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)
         self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)
-        # start vm0 and vm1 testpmd
-        self.start_vm_testpmd(pmd_session=self.vm0_pmd)
-        self.start_vm_testpmd(pmd_session=self.vm1_pmd)
-        after_bind = self.start_vms_testpmd_and_test(launch=False, quit_vm_testpmd=False)
-
         for i in range(len(self.vm)):
             self.vm[i].stop()
         self.vhost_user.send_expect("^C", "# ", 20)
@@ -603,6 +636,152 @@ class TestVswitchSampleCBDMA(TestCase):
             self.result_table_add(table_row)
         self.result_table_print()
 
+    def config_vm_env(self):
+        """
+        set virtio device IP and run arp protocal
+        """
+        vm0_intf = self.vm_dut[0].ports_info[0]['intf']
+        vm1_intf = self.vm_dut[1].ports_info[0]['intf']
+        self.vm_dut[0].send_expect("ifconfig %s %s" % (vm0_intf, self.virtio_ip0), "#", 10)
+        self.vm_dut[1].send_expect("ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10)
+        self.vm_dut[0].send_expect("arp -s %s %s" % (self.virtio_ip1, self.vm_dst_mac1), "#", 10)
+        self.vm_dut[1].send_expect("arp -s %s %s" % (self.virtio_ip0, self.vm_dst_mac0), "#", 10)
+
+    def start_iperf_test(self):
+        """
+        run perf command between to vms
+        """
+        iperf_server = "iperf -f g -s -i 1"
+        iperf_client = "iperf -f g -c 1.1.1.2 -i 1 -t 60"
+        self.vm_dut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10)
+        self.vm_dut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60)
+        time.sleep(90)
+
+    def get_iperf_result(self):
+        """
+        get the iperf test result
+        """
+        self.table_header = ['Mode', '[M|G]bits/sec']
+        self.result_table_create(self.table_header)
+        self.vm_dut[0].send_expect('pkill iperf', '# ')
+        self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir)
+        fp = open("./iperf_client.log")
+        fmsg = fp.read()
+        fp.close()
+        # remove the server report info from msg
+        index = fmsg.find("Server Report")
+        if index != -1:
+            fmsg = fmsg[:index]
+        iperfdata = re.compile('\S*\s*[M|G]bits/sec').findall(fmsg)
+        # the last data of iperf is the ave data from 0-30 sec
+        self.verify(len(iperfdata) != 0, "The iperf data between to vms is 0")
+        self.logger.info("The iperf data between vms is %s" % iperfdata[-1])
+        self.verify((iperfdata[-1].split()[1]) == 'Gbits/sec' and float(iperfdata[-1].split()[0]) >= 1, 'the throughput must be above 1Gbits/sec')
+
+        # put the result to table
+        results_row = ["vm2vm", iperfdata[-1]]
+        self.result_table_add(results_row)
+
+        # print iperf resut
+        self.result_table_print()
+        # rm the iperf log file in vm
+        self.vm_dut[0].send_expect('rm iperf_server.log', '#', 10)
+        self.vm_dut[1].send_expect('rm iperf_client.log', '#', 10)
+        return float(iperfdata[-1].split()[0])
+
+    def check_scp_file_valid_between_vms(self, file_size=1024):
+        """
+        scp file form VM1 to VM2, check the data is valid
+        """
+        # default file_size=1024K
+        data = ''
+        for char in range(file_size * 1024):
+            data += random.choice(self.random_string)
+        self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, '# ')
+        # scp this file to vm1
+        out = self.vm_dut[1].send_command('scp root@%s:/tmp/payload /root' % self.virtio_ip0, timeout=5)
+        if 'Are you sure you want to continue connecting' in out:
+            self.vm_dut[1].send_command('yes', timeout=3)
+        self.vm_dut[1].send_command(self.vm[0].password, timeout=3)
+        # get the file info in vm1, and check it valid
+        md5_send = self.vm_dut[0].send_expect('md5sum /tmp/payload', '# ')
+        md5_revd = self.vm_dut[1].send_expect('md5sum /root/payload', '# ')
+        md5_send = md5_send[: md5_send.find(' ')]
+        md5_revd = md5_revd[: md5_revd.find(' ')]
+        self.verify(md5_send == md5_revd, 'the received file is different with send file')
+
+    def start_iperf_and_scp_test_in_vms(self, need_start_vm=True, mode=0, mergeable=False, server_mode=False):
+        if need_start_vm:
+            self.start_vms(mode=mode, mergeable=mergeable, server_mode=server_mode, set_target=False)
+            self.vm0_pmd = PmdOutput(self.vm_dut[0])
+            self.vm1_pmd = PmdOutput(self.vm_dut[1])
+            self.config_vm_env()
+        self.check_scp_file_valid_between_vms()
+        self.start_iperf_test()
+        iperfdata = self.get_iperf_result()
+        return iperfdata
+
+    def test_vm2vm_split_ring_test_with_iperf_and_reconnect_stable_check(self):
+        """
+        Test Case5: VM2VM split ring test with iperf and reconnect stable check
+        """
+        perf_result = []
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+
+        self.logger.info("launch vhost")
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        before_rerun = self.start_iperf_and_scp_test_in_vms(need_start_vm=True, mode=0, mergeable=False, server_mode=True)
+
+        self.logger.info("relaunch vhost")
+        self.vhost_user.send_expect("^C", "# ", 20)
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True)
+        self.logger.info("rerun scp and iperf test")
+        rerun_test_1 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_2 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_3 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_4 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_5 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+
+        self.table_header = ['Path', 'Before/After rerun scp/iperf', 'Throughput(Mpps)']
+        self.result_table_create(self.table_header)
+        perf_result.append(['split ring', 'Before rerun', before_rerun])
+        perf_result.append(['split ring', 'rerun test 1', rerun_test_1])
+        perf_result.append(['split ring', 'rerun test 2', rerun_test_2])
+        perf_result.append(['split ring', 'rerun test 3', rerun_test_3])
+        perf_result.append(['split ring', 'rerun test 4', rerun_test_4])
+        perf_result.append(['split ring', 'rerun test 5', rerun_test_5])
+        for table_row in perf_result:
+            self.result_table_add(table_row)
+        self.result_table_print()
+
+    def test_vm2vm_packed_ring_test_with_iperf_and_reconnect_stable_check(self):
+        """
+        Test Case6: VM2VM packed ring test with iperf and reconnect stable test
+        """
+        perf_result = []
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+
+        self.start_vhost_app(with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=False)
+        before_rerun = self.start_iperf_and_scp_test_in_vms(need_start_vm=True, mode=2, mergeable=False, server_mode=False)
+
+        self.logger.info("rerun scp and iperf test")
+        rerun_test_1 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_2 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_3 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_4 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        rerun_test_5 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        self.table_header = ['Path', 'Before/After rerun scp/iperf', 'Throughput(Mpps)']
+        self.result_table_create(self.table_header)
+        perf_result.append(['packed ring', 'Before rerun test', before_rerun])
+        perf_result.append(['packed ring', 'rerun test 1', rerun_test_1])
+        perf_result.append(['packed ring', 'rerun test 2', rerun_test_2])
+        perf_result.append(['packed ring', 'rerun test 3', rerun_test_3])
+        perf_result.append(['packed ring', 'rerun test 4', rerun_test_4])
+        perf_result.append(['packed ring', 'rerun test 5', rerun_test_5])
+        for table_row in perf_result:
+            self.result_table_add(table_row)
+        self.result_table_print()
+
     def close_all_session(self):
         if getattr(self, 'vhost_user', None):
             self.dut.close_session(self.vhost_user)
@@ -622,6 +801,5 @@ class TestVswitchSampleCBDMA(TestCase):
         Run after each test suite.
         """
         self.set_max_queues(128)
-        self.set_async_threshold(256)
         self.dut.build_install_dpdk(self.target)
         self.close_all_session()
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-07-12  5:50 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-09 10:52 [dts] [PATCH V1] tests/vswitch_sample_cbdma: modify case and add case Xiang An
2021-07-09 14:07 ` Wang, Yinan
2021-07-12  5:27   ` Tu, Lijuan
2021-07-12  5:49   ` Tu, Lijuan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).