From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 85BA9A0561; Fri, 28 Feb 2020 07:10:23 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 71CB61BFF1; Fri, 28 Feb 2020 07:10:23 +0100 (CET) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 5B05D1BFF1 for ; Fri, 28 Feb 2020 07:10:21 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Feb 2020 22:10:21 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,493,1574150400"; d="scan'208";a="227440996" Received: from dpdk-lihong-ub1604.sh.intel.com ([10.67.118.203]) by orsmga007.jf.intel.com with ESMTP; 27 Feb 2020 22:10:19 -0800 From: lihong To: dts@dpdk.org Cc: lihong Date: Fri, 28 Feb 2020 06:41:30 +0800 Message-Id: <1582843290-4272-3-git-send-email-lihongx.ma@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1582843290-4272-1-git-send-email-lihongx.ma@intel.com> References: <1582843290-4272-1-git-send-email-lihongx.ma@intel.com> Subject: [dts] [PATCH V1 2/2] tests/pvp_multi_paths_vhost_single_core_performance: modify code to support per patch performance X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" Signed-off-by: lihong --- ...vp_multi_paths_vhost_single_core_performance.py | 228 +++++++++++++++++---- 1 file changed, 193 insertions(+), 35 deletions(-) diff --git a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py index a8b9e67..4abaa6b 100644 --- a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py +++ b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py @@ -34,10 +34,15 @@ DPDK Test suite. Test PVP vhost single core performance using virtio_user on 8 tx/rx path. """ +import json +import rst +import os import utils from test_case import TestCase from packet import Packet from pktgen import PacketGeneratorHelper +from settings import UPDATE_EXPECTED, load_global_setting +from copy import deepcopy class TestPVPMultiPathVhostPerformance(TestCase): @@ -62,6 +67,10 @@ class TestPVPMultiPathVhostPerformance(TestCase): self.tester.send_expect('mkdir -p %s' % self.out_path, '# ') # create an instance to set stream field setting self.pktgen_helper = PacketGeneratorHelper() + self.vhost_user = self.dut.new_session(suite="user") + self.vhost = self.dut.new_session(suite="vhost") + self.save_result_flag = True + self.json_obj = {} def set_up(self): """ @@ -69,12 +78,26 @@ class TestPVPMultiPathVhostPerformance(TestCase): """ # Prepare the result table self.table_header = ['Frame'] - self.table_header.append("Mode") + self.table_header.append("Mode/RXD-TXD") self.table_header.append("Mpps") self.table_header.append("% linerate") self.result_table_create(self.table_header) - self.vhost_user = self.dut.new_session(suite="user") - self.vhost = self.dut.new_session(suite="vhost") + + self.test_parameters = self.get_suite_cfg()['test_parameters'] + # test parameters include: frames size, descriptor numbers + self.test_parameters = self.get_suite_cfg()['test_parameters'] + + # traffic duraion in second + self.test_duration = self.get_suite_cfg()['test_duration'] + + # initilize throughput attribution + # {'$framesize':{"$nb_desc": 'throughput'} + self.throughput = {} + + # Accepted tolerance in Mpps + self.gap = self.get_suite_cfg()['accepted_tolerance'] + self.test_result = {} + self.nb_desc = self.test_parameters[64][0] def send_and_verify(self, case_info): """ @@ -82,33 +105,36 @@ class TestPVPMultiPathVhostPerformance(TestCase): """ for frame_size in self.frame_sizes: tgen_input = [] - for port in range(self.number_of_ports): - rx_port = self.tester.get_local_port( - self.dut_ports[port % self.number_of_ports]) - tx_port = self.tester.get_local_port( - self.dut_ports[(port) % self.number_of_ports]) - destination_mac = self.dut.get_mac_address( - self.dut_ports[(port) % self.number_of_ports]) - - pkt = Packet(pkt_type='UDP', pkt_len=frame_size) - pkt.config_layer('ether', {'dst': '%s' % destination_mac}) - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % (self.out_path, port)) - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap" % (self.out_path, port))) + self.throughput[frame_size] = dict() + self.logger.info("Test running at parameters: " + + "framesize: {}, rxd/txd: {}".format(frame_size, self.nb_desc)) + rx_port = self.tester.get_local_port( + self.dut_ports[0]) + tx_port = self.tester.get_local_port( + self.dut_ports[0]) + destination_mac = self.dut.get_mac_address( + self.dut_ports[0]) + + pkt = Packet(pkt_type='UDP', pkt_len=frame_size) + pkt.config_layer('ether', {'dst': '%s' % destination_mac}) + pkt.save_pcapfile(self.tester, "%s/multi_path.pcap" % (self.out_path)) + tgen_input.append((tx_port, rx_port, "%s/multi_path.pcap" % (self.out_path))) self.tester.pktgen.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None, self.tester.pktgen) traffic_opt = {'delay': 5} _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=traffic_opt) Mpps = pps / 1000000.0 - self.verify(Mpps > 0, "%s can not receive packets of frame size %d" % (self.running_case, frame_size)) + self.verify(Mpps > 0.0, "%s can not receive packets of frame size %d" % (self.running_case, frame_size)) - throughput = Mpps * 100 / \ + linerate = Mpps * 100 / \ float(self.wirespeed(self.nic, frame_size, self.number_of_ports)) + self.throughput[frame_size][self.nb_desc] = Mpps results_row = [frame_size] results_row.append(case_info) results_row.append(Mpps) - results_row.append(throughput) + results_row.append(linerate) self.result_table_add(results_row) @property @@ -130,7 +156,8 @@ class TestPVPMultiPathVhostPerformance(TestCase): vdevs=['net_vhost0,iface=vhost-net,queues=1']) if self.check_2M_env: eal_param += " --single-file-segments" - command_line_client = "./%s/app/testpmd " % self.target + eal_param + " -- -i --nb-cores=1 --txd=1024 --rxd=1024" + command_line_client = "./%s/app/testpmd " % self.target + eal_param + \ + " -- -i --nb-cores=1 --txd=%d --rxd=%d" % (self.nb_desc, self.nb_desc) self.vhost.send_expect(command_line_client, "testpmd> ", 120) self.vhost.send_expect("set fwd mac", "testpmd> ", 120) self.vhost.send_expect("start", "testpmd> ", 120) @@ -140,16 +167,114 @@ class TestPVPMultiPathVhostPerformance(TestCase): start testpmd on virtio """ eal_param = self.dut.create_eal_parameters(cores=self.core_list_user, prefix='virtio', + ports=[0], vdevs=['virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,queues=1,%s' % args["version"]]) if self.check_2M_env: eal_param += " --single-file-segments" - command_line_user = "./%s/app/testpmd " % self.target + eal_param + " -- -i %s --nb-cores=2 --txd=1024 --rxd=1024" % \ - args["path"] + command_line_user = "./%s/app/testpmd " % self.target + eal_param + \ + " -- -i %s --nb-cores=2 --txd=%d --rxd=%d" % \ + (args["path"], self.nb_desc, self.nb_desc) self.vhost_user.send_expect(command_line_user, "testpmd> ", 120) self.vhost_user.send_expect("set fwd io", "testpmd> ", 120) self.vhost_user.send_expect("start", "testpmd> ", 120) + def handle_expected(self): + """ + Update expected numbers to configurate file: conf/$suite_name.cfg + """ + if load_global_setting(UPDATE_EXPECTED) == "yes": + for frame_size in self.test_parameters.keys(): + for nb_desc in self.test_parameters[frame_size]: + self.expected_throughput[frame_size][nb_desc] = round(self.throughput[frame_size][nb_desc], 3) + + def handle_results(self): + """ + results handled process: + 1, save to self.test_results + 2, create test results table + 3, save to json file for Open Lab + """ + header = self.table_header + header.append("Expected Throughput") + header.append("Throughput Difference") + for frame_size in self.test_parameters.keys(): + wirespeed = self.wirespeed(self.nic, frame_size, self.number_of_ports) + ret_datas = {} + for nb_desc in self.test_parameters[frame_size]: + ret_data = {} + ret_data[header[0]] = frame_size + ret_data[header[1]] = nb_desc + ret_data[header[2]] = "{:.3f} Mpps".format( + self.throughput[frame_size][nb_desc]) + ret_data[header[3]] = "{:.3f}%".format( + self.throughput[frame_size][nb_desc] * 100 / wirespeed) + ret_data[header[4]] = "{:.3f} Mpps".format( + self.expected_throughput[frame_size][nb_desc]) + ret_data[header[5]] = "{:.3f} Mpps".format( + self.throughput[frame_size][nb_desc] - + self.expected_throughput[frame_size][nb_desc]) + ret_datas[nb_desc] = deepcopy(ret_data) + self.test_result[frame_size] = deepcopy(ret_datas) + # Create test results table + self.result_table_create(header) + for frame_size in self.test_parameters.keys(): + for nb_desc in self.test_parameters[frame_size]: + table_row = list() + for i in range(len(header)): + table_row.append( + self.test_result[frame_size][nb_desc][header[i]]) + self.result_table_add(table_row) + # present test results to screen + self.result_table_print() + # save test results as a file + if self.save_result_flag: + self.save_result(self.test_result) + + def save_result(self, data): + ''' + Saves the test results as a separated file named with + self.nic+_perf_virtio_user_pvp.json in output folder + if self.save_result_flag is True + ''' + case_name = self.running_case + self.json_obj[case_name] = list() + status_result = [] + for frame_size in self.test_parameters.keys(): + for nb_desc in self.test_parameters[frame_size]: + row_in = self.test_result[frame_size][nb_desc] + row_dict0 = dict() + row_dict0['performance'] = list() + row_dict0['parameters'] = list() + row_dict0['parameters'] = list() + result_throughput = float(row_in['Mpps'].split()[0]) + expected_throughput = float(row_in['Expected Throughput'].split()[0]) + # delta value and accepted tolerance in percentage + delta = result_throughput - expected_throughput + gap = expected_throughput * -self.gap * 0.01 + delta = float(delta) + gap = float(gap) + self.logger.info("Accept tolerance are (Mpps) %f" % gap) + self.logger.info("Throughput Difference are (Mpps) %f" % delta) + if result_throughput > expected_throughput + gap: + row_dict0['status'] = 'PASS' + else: + row_dict0['status'] = 'FAIL' + self.verify(row_dict0['status'] == 'PASS', 'The throughput is not in correct range') + row_dict1 = dict(name="Throughput", value=result_throughput, unit="Mpps", delta=delta) + row_dict2 = dict(name="Txd/Rxd", value=row_in["Mode/RXD-TXD"], unit="descriptor") + row_dict3 = dict(name="frame_size", value=row_in["Frame"], unit="bytes") + row_dict0['performance'].append(row_dict1) + row_dict0['parameters'].append(row_dict2) + row_dict0['parameters'].append(row_dict3) + self.json_obj[case_name].append(row_dict0) + status_result.append(row_dict0['status']) + with open(os.path.join(rst.path2Result, + '{0:s}_{1}.json'.format( + self.nic, self.suite_name)), 'w') as fp: + json.dump(self.json_obj, fp) + self.verify("Fail" not in status_result, "Exceeded Gap") + def close_all_testpmd(self): """ close all testpmd of vhost and virtio @@ -168,130 +293,163 @@ class TestPVPMultiPathVhostPerformance(TestCase): """ performance for Vhost PVP virtio 1.1 Mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1_mergeable on") self.close_all_testpmd() - - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_virtio11_normal(self): """ performance for Vhost PVP virtio1.1 Normal Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1 normal") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_virtio11_inorder_mergeable(self): """ performance for Vhost PVP virtio 1.1 inorder Mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1_inorder_mergeable on") self.close_all_testpmd() - - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_virtio11_inorder_normal(self): """ performance for Vhost PVP virtio1.1 inorder Normal Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1 inorder normal") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_inorder_mergeable(self): """ performance for Vhost PVP In_order mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("inoder mergeable on") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_inorder_no_mergeable(self): """ performance for Vhost PVP In_order no_mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("inoder mergeable off") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_mergeable(self): """ performance for Vhost PVP Mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("mergeable on") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_normal(self): """ performance for Vhost PVP Normal Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("normal") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_vhost_single_core_vector_rx(self): """ performance for Vhost PVP Vector_RX Path """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0", "path": "--tx-offloads=0x0"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("vector rx") self.close_all_testpmd() - self.close_all_session() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("killall -s INT testpmd", "#") - self.close_all_session() + self.dut.kill_all() def tear_down_all(self): """ Run after each test suite. """ - pass + self.close_all_session() -- 2.7.4