test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V2] tests/vhost_cbdma: Optimize Case1-2 and Add Case3 to check CBDMA threshold value
@ 2020-12-30 15:58 JiangYu
  2020-12-30  8:08 ` Jiang, YuX
  2021-01-07  3:36 ` Tu, Lijuan
  0 siblings, 2 replies; 4+ messages in thread
From: JiangYu @ 2020-12-30 15:58 UTC (permalink / raw)
  To: dts; +Cc: JiangYu

V1:
0, Add expected value json file and related function.
1, Optimize Case1: Add relaunch vhost performance check.
2, Optimize self.send_and_verify function.
3, Add New Case3: test_check_threshold_value_with_cbdma.

V2: Optimize Case3 check point.

Signed-off-by: JiangYu <yux.jiang@intel.com>
---
 conf/vhost_cbdma.cfg           |   7 +
 tests/TestSuite_vhost_cbdma.py | 326 +++++++++++++++++++++++++++++++++++++----
 2 files changed, 302 insertions(+), 31 deletions(-)
 create mode 100644 conf/vhost_cbdma.cfg

diff --git a/conf/vhost_cbdma.cfg b/conf/vhost_cbdma.cfg
new file mode 100644
index 0000000..2777d79
--- /dev/null
+++ b/conf/vhost_cbdma.cfg
@@ -0,0 +1,7 @@
+[suite]
+update_expected = True
+test_parameters = {64: [1024], 1518: [1024]}
+test_duration = 60
+accepted_tolerance = 2
+expected_throughput = {'test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations': {'dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'virtio_user_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'Relaunch_dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vhost_userRelaunch_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'Relaunch_vhost_2_cbdma': {64: {1024: 0.00}, 1518: {1024: 0.00}}}, 'test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations': {'inorder_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}},}
+
diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py
index 869765f..9b9d53d 100644
--- a/tests/TestSuite_vhost_cbdma.py
+++ b/tests/TestSuite_vhost_cbdma.py
@@ -42,12 +42,16 @@ Here is an example:
  $ ./testpmd -c f -n 4 \
    --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@80:04.0],dmathr=1024'
 """
+import rst, json
 import re
 import time
 from test_case import TestCase
 from settings import HEADER_SIZE
+from packet import Packet
 from pktgen import PacketGeneratorHelper
 from pmd_output import PmdOutput
+from settings import UPDATE_EXPECTED, load_global_setting
+from copy import deepcopy
 
 
 class TestVirTioVhostCbdma(TestCase):
@@ -58,8 +62,10 @@ class TestVirTioVhostCbdma(TestCase):
 
         self.vhost_user = self.dut.new_session(suite="vhost-user")
         self.virtio_user = self.dut.new_session(suite="virtio-user")
+        self.virtio_user1 = self.dut.new_session(suite="virtio-user1")
         self.pmdout_vhost_user = PmdOutput(self.dut, self.vhost_user)
         self.pmdout_virtio_user = PmdOutput(self.dut, self.virtio_user)
+        self.pmdout_virtio_user1 = PmdOutput(self.dut, self.virtio_user1)
         self.frame_sizes = [64, 1518]
         self.virtio_mac = "00:01:02:03:04:05"
         self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE['ip']
@@ -77,20 +83,40 @@ class TestVirTioVhostCbdma(TestCase):
             self.tester.send_expect('mkdir -p %s' % self.out_path, '# ')
         self.pktgen_helper = PacketGeneratorHelper()
         self.base_dir = self.dut.base_dir.replace('~', '/root')
+        self.testpmd_name = self.dut.apps_name['test-pmd'].split("/")[-1]
+        self.save_result_flag = True
+        self.json_obj = {}
 
     def set_up(self):
         """
         Run before each test case.
         """
         self.table_header = ['Frame']
+        self.table_header.append("Mode/RXD-TXD")
         self.used_cbdma = []
-        self.table_header.append("Mode")
         self.table_header.append("Mpps")
         self.table_header.append("% linerate")
         self.result_table_create(self.table_header)
-        self.vhost = self.dut.new_session(suite="vhost-user")
+
+        # test parameters include: frames size, descriptor numbers
+        self.test_parameters = self.get_suite_cfg()['test_parameters']
+
+        # traffic duraion in second
+        self.test_duration = self.get_suite_cfg()['test_duration']
+
+        # initilize throughput attribution
+        # {'TestCase':{ 'Mode': {'$framesize':{"$nb_desc": 'throughput'}}}
+        self.throughput = {}
+
+        # Accepted tolerance in Mpps
+        self.gap = self.get_suite_cfg()['accepted_tolerance']
+        self.test_result = {}
+        self.nb_desc = self.test_parameters[64][0]
+
+        self.dut.send_expect("killall -I %s" % self.testpmd_name, '#', 20)
         self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
         self.dut.send_expect("rm -rf /tmp/s0", "#")
+        self.mode_list = []
 
     def bind_nic_driver(self, ports, driver=""):
         if driver == "igb_uio":
@@ -143,6 +169,33 @@ class TestVirTioVhostCbdma(TestCase):
             self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma  %s' % self.device_str,
                                  '# ', 60)
 
+    def check_packets_of_each_queue(self, queue_list):
+        """
+        check each queue has receive packets
+        """
+        out = self.vhost_user.send_expect("stop", "testpmd> ", 60)
+        for queue_index in queue_list:
+            queue = "Queue= %d" % queue_index
+            index = out.find(queue)
+            rx = re.search("RX-packets:\s*(\d*)", out[index:])
+            tx = re.search("TX-packets:\s*(\d*)", out[index:])
+            rx_packets = int(rx.group(1))
+            tx_packets = int(tx.group(1))
+            self.verify(rx_packets > 0 and tx_packets > 0,
+                        "The queue %d rx-packets or tx-packets is 0 about " %
+                        queue_index + \
+                        "rx-packets:%d, tx-packets:%d" %
+                        (rx_packets, tx_packets))
+
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 30)
+        self.vhost_user.send_expect("start", "testpmd> ", 30)
+
+    def check_port_stats_result(self, session):
+        out = session.send_expect("show port stats all", "testpmd> ", 30)
+        self.result_first = re.findall(r'RX-packets: (\w+)', out)
+        self.result_secondary = re.findall(r'TX-packets: (\w+)', out)
+        self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1, "forward packets no correctly")
+
     @property
     def check_2m_env(self):
         out = self.dut.send_expect("cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# ")
@@ -156,6 +209,16 @@ class TestVirTioVhostCbdma(TestCase):
         self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120)
         self.vhost_user.send_expect('start', 'testpmd> ', 120)
 
+    def launch_testpmd_as_virtio_user1(self, command, cores="Default", dev=""):
+        eal_params = ""
+        if self.check_2m_env:
+            eal_params += " --single-file-segments"
+        self.pmdout_virtio_user1.start_testpmd(cores, command, vdevs=[dev], ports=[], no_pci=True,
+                                               prefix="virtio1", fixed_prefix=True, eal_param=eal_params)
+        self.virtio_user1.send_expect('set fwd mac', 'testpmd> ', 30)
+        self.virtio_user1.send_expect('start', 'testpmd> ', 30)
+        self.virtio_user1.send_expect('show port info all', 'testpmd> ', 30)
+
     def launch_testpmd_as_virtio_user(self, command, cores="Default", dev=""):
         eal_params = ""
         if self.check_2m_env:
@@ -165,11 +228,11 @@ class TestVirTioVhostCbdma(TestCase):
 
         self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 120)
         self.virtio_user.send_expect('start', 'testpmd> ', 120)
+        self.virtio_user.send_expect('show port info all', 'testpmd> ', 30)
 
-    def diff_param_launch_send_and_verify(self, mode, params, dev, cores, is_quit=True):
-        self.launch_testpmd_as_virtio_user(params,
-                                           cores,
-                                           dev=dev)
+    def diff_param_launch_send_and_verify(self, mode, params, dev, cores, is_quit=True, launch_virtio=True):
+        if launch_virtio:
+            self.launch_testpmd_as_virtio_user(params, cores, dev=dev)
 
         self.send_and_verify(mode)
         if is_quit:
@@ -181,6 +244,9 @@ class TestVirTioVhostCbdma(TestCase):
         used one cbdma port  bonding igb_uio
         :return:
         """
+        self.test_target = self.running_case
+        self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
+
         txd_rxd = 1024
         dmathr = 1024
         eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
@@ -207,16 +273,34 @@ class TestVirTioVhostCbdma(TestCase):
                     queue, txd_rxd, txd_rxd)
             vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
             self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs,
-                                                   self.cores[2:4],
+                                                   self.cores[2:4], is_quit=False,
                                                    )
+            self.mode_list.append(key)
+            # step3 restart vhost port, then check throughput again
+            key += "_RestartVhost"
+            self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)
+            self.vhost_user.send_expect('stop', 'testpmd> ', 10)
+            self.vhost_user.send_expect('start', 'testpmd> ', 10)
+            self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)
+            self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)
+            self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs,
+                                                   self.cores[2:4], launch_virtio=False)
+            self.mode_list.append(key)
+
+        self.vhost_user.send_expect("quit", "# ")
         self.result_table_print()
+        self.handle_expected(mode_list=self.mode_list)
+        self.handle_results(mode_list=self.mode_list)
 
     def test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations(self):
         """
         # used 2 cbdma ports  bonding igb_uio
         :return:
         """
-        used_cbdma_num = 2
+        self.test_target = self.running_case
+        self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
+
+        used_cbdma_num = 4
         queue = 2
         txd_rxd = 1024
         dmathr = 1024
@@ -237,50 +321,105 @@ class TestVirTioVhostCbdma(TestCase):
         self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2],
                                           dev=vhost_dev % vhost_dmas)
         #
-        #  queue 2 start virtio testpmd,virtio queue 2 to 1
+        #  queue 2 start virtio testpmd, check perforamnce and RX/TX
         mode = "dynamic_queue2"
+        self.mode_list.append(mode)
         self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params,
                                            self.cores[2:4],
                                            dev=virtio_dev)
-        self.send_and_verify(mode)
+        self.send_and_verify(mode, queue_list=range(queue))
+
+        # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues
         self.vhost_or_virtio_set_one_queue(self.virtio_user)
-        self.send_and_verify("virtio_user_" + mode + "_change_to_1", multiple_queue=False)
 
+        self.send_and_verify("virtio_user_" + mode + "_change_to_1", queue_list=[0])
+        self.mode_list.append("virtio_user_" + mode + "_change_to_1")
         self.virtio_user.send_expect("stop", "testpmd> ")
         self.virtio_user.send_expect("quit", "# ")
         time.sleep(5)
         self.dut.send_expect(f"rm -rf {virtio_path}", "#")
-        # queue 2 start virtio testpmd,vhost queue 2 to 1
+
+        # queue 2 start virtio testpmd, check perforamnce and RX/TX
         self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params,
                                            self.cores[2:4],
                                            dev=virtio_dev)
         mode = "Relaunch_dynamic_queue2"
-        self.send_and_verify(mode)
+        self.mode_list.append(mode)
+        self.send_and_verify(mode, queue_list=range(queue))
+
+        # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues
         self.vhost_or_virtio_set_one_queue(self.vhost_user)
         self.send_and_verify("vhost_user" + mode + "_change_to_1")
+        self.mode_list.append("vhost_user" + mode + "_change_to_1")
         self.vhost_user.send_expect("quit", "# ")
         time.sleep(2)
 
         # Relaunch vhost with another two cbdma channels
         mode = "Relaunch_vhost_2_cbdma"
+        self.mode_list.append(mode)
         dmathr = 512
-        vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]}],dmathr={dmathr}"
+        vhost_dmas = f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}"
         self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2],
                                           dev=vhost_dev % vhost_dmas)
-        self.send_and_verify(mode)
+        self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
+        self.send_and_verify(mode, queue_list=range(queue))
+        self.check_port_stats_result(self.virtio_user)
         self.virtio_user.send_expect("quit", "# ")
         self.vhost_user.send_expect("quit", "# ")
-        time.sleep(2)
+
         self.result_table_print()
+        # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]]
+        result_rows = self.result_table_getrows()  #
+        self.handle_expected(mode_list=self.mode_list)
+        self.handle_results(mode_list=self.mode_list)
+
+    def test_check_threshold_value_with_cbdma(self):
+        """
+        Test Case3: CBDMA threshold value check
+        """
+        used_cbdma_num = 4
+        params = '--nb-cores=1 --rxq=2 --txq=2'
+        dmathr = [512, 4096]
+        vid_dict = {}
+        self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+        self.dut.restore_interfaces()
+        # launch vhost, Check the cbdma threshold value for each vhost port can be config correct from vhost log
+        vhost_vdev = [f"'eth_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr[0]}'", \
+            f"'eth_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr[1]}'"]
+        virtio_dev0 = f"net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=4096"
+        virtio_dev1 = f"net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=4096"
+        vdev_params = '{} --vdev {}'.format(vhost_vdev[0], vhost_vdev[1])
+        self.pmdout_vhost_user.start_testpmd(cores=self.cores[0:2], param=params, vdevs=[vdev_params], ports=[], prefix="vhost", fixed_prefix=True)
+        self.vhost_user.send_expect('start', 'testpmd> ', 120)
+        # vid0,qid0,dma2,threshold:4096
+        self.launch_testpmd_as_virtio_user1(params, self.cores[2:4], dev=virtio_dev1)
+        vid_dict[dmathr[1]] = 0
+        # vid1,qid0,dma0,threshold:512
+        self.launch_testpmd_as_virtio_user(params, self.cores[4:6], dev=virtio_dev0)
+        vid_dict[dmathr[0]] = 1
+        # Check the cbdma threshold value for each vhost port can be config correct from vhost log
+        out = self.vhost_user.get_session_before(timeout=2)
+        self.vhost_user.send_expect("quit", "# ")
+        self.virtio_user.send_expect("quit", "# ")
+        self.virtio_user1.send_expect("quit", "# ")
+        pattern = re.compile(r'dma parameters: vid\S+,qid\d+,dma\d+,threshold:\d+')
+        return_param = re.findall(pattern, out)
+        self.logger.info("Actual Info:" + str(return_param))
+        check_value = 0
+        for dma in dmathr:
+            check_value += len(re.findall('vid{},\S+threshold:{}'.format(vid_dict[dma], dma), str(return_param)))
+        self.verify(check_value == used_cbdma_num, "Check failed: Actual value:{}".format(return_param))
 
     @staticmethod
     def vhost_or_virtio_set_one_queue(session):
-        session.send_expect('start', 'testpmd> ', 120)
         session.send_expect('stop', 'testpmd> ', 120)
         session.send_expect('port stop all', 'testpmd> ', 120)
         session.send_expect('port config all rxq 1', 'testpmd> ', 120)
+        session.send_expect('port config all txq 1', 'testpmd> ', 120)
         session.send_expect('port start all', 'testpmd> ', 120)
         session.send_expect('start', 'testpmd> ', 120)
+        session.send_expect('show port info all', 'testpmd> ', 30)
+        session.send_expect('show port stats all', 'testpmd> ', 120)
         time.sleep(5)
 
     @property
@@ -292,47 +431,171 @@ class TestVirTioVhostCbdma(TestCase):
             check_dict[size] = round(speed * linerate[size], 2)
         return check_dict
 
-    def send_and_verify(self, mode, multiple_queue=True):
+    def send_and_verify(self, mode, multiple_queue=True, queue_list=[]):
         """
         Send packet with packet generator and verify
         """
+        self.throughput[mode] = dict()
         for frame_size in self.frame_sizes:
+            self.throughput[mode][frame_size] = dict()
+
             payload_size = frame_size - self.headers_size
-            tgen_input = []
-            rx_port = self.tester.get_local_port(self.dut_ports[0])
-            tx_port = self.tester.get_local_port(self.dut_ports[0])
-            ip_src = 'src=RandIP()'
+            tgenInput = []
+            port = self.tester.get_local_port(self.dut_ports[0])
+
+            fields_config = {'ip': {'src': {'action': 'random'},},}
             if not multiple_queue:
-                ip_src = ""
+                fields_config = None
+
+            pkt1 = Packet()
+            pkt1.assign_layers(['ether', 'ipv4', 'raw'])
+            pkt1.config_layers([('ether', {'dst': '%s' % self.virtio_mac}), ('ipv4', {'src': '1.1.1.1'}),
+                                ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
 
-            pacp = 'wrpcap("%s/vhost.pcap", [Ether(dst="%s")/IP(%s)/("X"*%d)])' \
-                   % (self.out_path, self.virtio_mac, ip_src, payload_size)
-            self.tester.scapy_append(pacp)
-            tgen_input.append((tx_port, rx_port, "%s/vhost.pcap" % self.out_path))
+            pkt1.save_pcapfile(self.tester, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size))
+            tgenInput.append((port, port, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size)))
 
-            self.tester.scapy_execute()
             self.tester.pktgen.clear_streams()
-            streams = self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100,
-                                                                     None, self.tester.pktgen)
+            streams = self.pktgen_helper.prepare_stream_from_tginput(tgenInput, 100, fields_config, self.tester.pktgen)
             trans_options = {'delay': 5, 'duration': 20}
             _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=trans_options)
+
             Mpps = pps / 1000000.0
             self.verify(Mpps > 0,
                         "%s can not receive packets of frame size %d" % (self.running_case, frame_size))
             throughput = Mpps * 100 / \
                          float(self.wirespeed(self.nic, frame_size, 1))
 
+            self.throughput[mode][frame_size][self.nb_desc] = Mpps
+
             results_row = [frame_size]
             results_row.append(mode)
             results_row.append(Mpps)
             results_row.append(throughput)
             self.result_table_add(results_row)
 
+        if queue_list:
+            # check RX/TX can work normally in each queues
+            self.check_packets_of_each_queue(queue_list=queue_list)
+
+    def handle_expected(self, mode_list):
+        """
+        Update expected numbers to configurate file: conf/$suite_name.cfg
+        """
+        if load_global_setting(UPDATE_EXPECTED) == "yes":
+            for mode in mode_list:
+                for frame_size in self.test_parameters.keys():
+                    for nb_desc in self.test_parameters[frame_size]:
+                        self.expected_throughput[mode][frame_size][nb_desc] = round(
+                            self.throughput[mode][frame_size][nb_desc], 3)
+
+    def handle_results(self, mode_list):
+        """
+        results handled process:
+        1, save to self.test_results
+            table_header = ['Frame', 'Mode/RXD-TXD', 'Mpps', '% linerate', 'nb_desc', 'Expected Throughput', 'Throughput Difference']
+            ret_datas = {1024: {'Frame': 64, 'Mode/RXD-TXD': 'dynamic_queue2', 'Mpps': '7.537 Mpps', '% linerate': '12.662%',
+                        'nb_desc': 1024, 'Expected Throughput': '7.537 Mpps', 'Throughput Difference': '-0.000 Mpps'}}
+            self.test_result = {'dynamic_queue2': {64: {1024: {'Frame': 64, 'Mode/RXD-TXD': 'dynamic_queue2', 'Mpps': '7.537 Mpps',
+                '% linerate': '12.662%', 'nb_desc': 1024, 'Expected Throughput': '7.537 Mpps', 'Throughput Difference': '-0.000 Mpps'}}}}
+        2, create test results table
+        3, save to json file for Open Lab
+        """
+        header = self.table_header
+        header.append("nb_desc")
+        header.append("Expected Throughput")
+        header.append("Throughput Difference")
+
+        for mode in mode_list:
+            self.test_result[mode] = dict()
+            for frame_size in self.test_parameters.keys():
+                wirespeed = self.wirespeed(self.nic, frame_size, self.number_of_ports)
+                ret_datas = {}
+                for nb_desc in self.test_parameters[frame_size]:
+                    ret_data = {}
+                    ret_data[header[0]] = frame_size
+                    ret_data[header[1]] = mode
+                    ret_data[header[2]] = "{:.3f} Mpps".format(
+                        self.throughput[mode][frame_size][nb_desc])
+                    ret_data[header[3]] = "{:.3f}%".format(
+                        self.throughput[mode][frame_size][nb_desc] * 100 / wirespeed)
+                    ret_data[header[4]] = nb_desc
+                    ret_data[header[5]] = "{:.3f} Mpps".format(
+                        self.expected_throughput[mode][frame_size][nb_desc])
+                    ret_data[header[6]] = "{:.3f} Mpps".format(
+                        self.throughput[mode][frame_size][nb_desc] -
+                        self.expected_throughput[mode][frame_size][nb_desc])
+                    ret_datas[nb_desc] = deepcopy(ret_data)
+                self.test_result[mode][frame_size] = deepcopy(ret_datas)
+        # Create test results table
+        self.result_table_create(header)
+        for mode in mode_list:
+            for frame_size in self.test_parameters.keys():
+                for nb_desc in self.test_parameters[frame_size]:
+                    table_row = list()
+                    for i in range(len(header)):
+                        table_row.append(
+                            self.test_result[mode][frame_size][nb_desc][header[i]])
+                    self.result_table_add(table_row)
+
+        # present test results to screen
+        self.result_table_print()
+        # save test results as a file
+        if self.save_result_flag:
+            self.save_result(self.test_result, mode_list)
+
+    def save_result(self, data, mode_list):
+        """
+        Saves the test results as a separated file named with
+        self.nic+_perf_virtio_user_pvp.json in output folder
+        if self.save_result_flag is True
+        """
+        case_name = self.running_case
+        self.json_obj[case_name] = list()
+        status_result = []
+
+        for mode in mode_list:
+            for frame_size in self.test_parameters.keys():
+                for nb_desc in self.test_parameters[frame_size]:
+                    row_in = self.test_result[mode][frame_size][nb_desc]
+                    row_dict0 = dict()
+                    row_dict0['performance'] = list()
+                    row_dict0['parameters'] = list()
+                    row_dict0['parameters'] = list()
+                    result_throughput = float(row_in['Mpps'].split()[0])
+                    expected_throughput = float(row_in['Expected Throughput'].split()[0])
+                    # delta value and accepted tolerance in percentage
+                    delta = result_throughput - expected_throughput
+                    gap = expected_throughput * -self.gap * 0.01
+                    delta = float(delta)
+                    gap = float(gap)
+                    self.logger.info("Accept tolerance are (Mpps) %f" % gap)
+                    self.logger.info("Throughput Difference are (Mpps) %f" % delta)
+                    if result_throughput > expected_throughput + gap:
+                        row_dict0['status'] = 'PASS'
+                    else:
+                        row_dict0['status'] = 'FAIL'
+                    row_dict1 = dict(name="Throughput", value=result_throughput, unit="Mpps", delta=delta)
+                    row_dict2 = dict(name="Txd/Rxd", value=row_in["Mode/RXD-TXD"], unit="descriptor")
+                    row_dict3 = dict(name="frame_size", value=row_in["Frame"], unit="bytes")
+                    row_dict0['performance'].append(row_dict1)
+                    row_dict0['parameters'].append(row_dict2)
+                    row_dict0['parameters'].append(row_dict3)
+                    self.json_obj[case_name].append(row_dict0)
+                    status_result.append(row_dict0['status'])
+
+        with open(os.path.join(rst.path2Result,
+                               '{0:s}_{1}.json'.format(
+                                   self.nic, self.suite_name)), 'w') as fp:
+            json.dump(self.json_obj, fp)
+        self.verify("FAIL" not in status_result, "Exceeded Gap")
+
     def tear_down(self):
         """
         Run after each test case.
         Clear qemu and testpmd to avoid blocking the following TCs
         """
+        self.dut.send_expect("killall -I %s" % self.testpmd_name, '#', 20)
         self.bind_cbdma_device_to_kernel()
         self.bind_nic_driver(self.dut_ports)
 
@@ -344,4 +607,5 @@ class TestVirTioVhostCbdma(TestCase):
         self.bind_nic_driver(self.dut_ports, self.drivername)
         self.dut.close_session(self.vhost_user)
         self.dut.close_session(self.virtio_user)
-
+        self.dut.close_session(self.virtio_user1)
+        self.dut.kill_all()
-- 
2.7.4


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-01-07  3:36 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-30 15:58 [dts] [PATCH V2] tests/vhost_cbdma: Optimize Case1-2 and Add Case3 to check CBDMA threshold value JiangYu
2020-12-30  8:08 ` Jiang, YuX
2020-12-30  8:13   ` Wang, Yinan
2021-01-07  3:36 ` Tu, Lijuan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).