From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3D47FA056B; Tue, 3 Mar 2020 09:59:56 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 311731C012; Tue, 3 Mar 2020 09:59:56 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 2C3001C012 for ; Tue, 3 Mar 2020 09:59:55 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 03 Mar 2020 00:59:54 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,510,1574150400"; d="scan'208";a="233554498" Received: from dpdk-lihong-ub1604.sh.intel.com ([10.67.118.203]) by orsmga008.jf.intel.com with ESMTP; 03 Mar 2020 00:59:53 -0800 From: lihong To: dts@dpdk.org Cc: lihong Date: Tue, 3 Mar 2020 09:31:01 +0800 Message-Id: <1583199061-12136-3-git-send-email-lihongx.ma@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1583199061-12136-1-git-send-email-lihongx.ma@intel.com> References: <1583199061-12136-1-git-send-email-lihongx.ma@intel.com> Subject: [dts] [PATCH V2 2/2] tests/pvp_multi_paths_performance: modify code to support per patch performance X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" Signed-off-by: lihong --- tests/TestSuite_pvp_multi_paths_performance.py | 217 ++++++++++++++++++++++--- 1 file changed, 192 insertions(+), 25 deletions(-) diff --git a/tests/TestSuite_pvp_multi_paths_performance.py b/tests/TestSuite_pvp_multi_paths_performance.py index 62fef12..092d98c 100644 --- a/tests/TestSuite_pvp_multi_paths_performance.py +++ b/tests/TestSuite_pvp_multi_paths_performance.py @@ -33,11 +33,15 @@ DPDK Test suite. Test PVP performance using virtio_user on 8 tx/rx path. """ - +import json +import rst +import os import utils from test_case import TestCase from packet import Packet from pktgen import PacketGeneratorHelper +from settings import UPDATE_EXPECTED, load_global_setting +from copy import deepcopy class TestPVPMultiPathPerformance(TestCase): @@ -63,38 +67,58 @@ class TestPVPMultiPathPerformance(TestCase): self.tester.send_expect('mkdir -p %s' % self.out_path, '# ') # create an instance to set stream field setting self.pktgen_helper = PacketGeneratorHelper() + self.vhost_user = self.dut.new_session(suite="user") + self.vhost = self.dut.new_session(suite="vhost") + self.save_result_flag = True + self.json_obj = {} def set_up(self): """ Run before each test case. """ - self.vhost_user = self.dut.new_session(suite="user") - self.vhost = self.dut.new_session(suite="vhost") # Prepare the result table self.table_header = ['Frame'] - self.table_header.append("Mode") + self.table_header.append("Mode/RXD-TXD") self.table_header.append("Mpps") self.table_header.append("% linerate") self.result_table_create(self.table_header) + self.test_parameters = self.get_suite_cfg()['test_parameters'] + # test parameters include: frames size, descriptor numbers + self.test_parameters = self.get_suite_cfg()['test_parameters'] + + # traffic duraion in second + self.test_duration = self.get_suite_cfg()['test_duration'] + + # initilize throughput attribution + # {'$framesize':{"$nb_desc": 'throughput'} + self.throughput = {} + + # Accepted tolerance in Mpps + self.gap = self.get_suite_cfg()['accepted_tolerance'] + self.test_result = {} + self.nb_desc = self.test_parameters[64][0] + def send_and_verify(self, case_info): """ Send packet with packet generator and verify """ for frame_size in self.frame_sizes: tgen_input = [] - for port in range(self.number_of_ports): - rx_port = self.tester.get_local_port( - self.dut_ports[port % self.number_of_ports]) - tx_port = self.tester.get_local_port( - self.dut_ports[(port) % self.number_of_ports]) - destination_mac = self.dut.get_mac_address( - self.dut_ports[(port) % self.number_of_ports]) - - pkt = Packet(pkt_type='UDP', pkt_len=frame_size) - pkt.config_layer('ether', {'dst': '%s' % destination_mac}) - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % (self.out_path, port)) - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap" % (self.out_path, port))) + self.throughput[frame_size] = dict() + self.logger.info("Test running at parameters: " + + "framesize: {}, rxd/txd: {}".format(frame_size, self.nb_desc)) + rx_port = self.tester.get_local_port( + self.dut_ports[0]) + tx_port = self.tester.get_local_port( + self.dut_ports[0]) + destination_mac = self.dut.get_mac_address( + self.dut_ports[0]) + + pkt = Packet(pkt_type='UDP', pkt_len=frame_size) + pkt.config_layer('ether', {'dst': '%s' % destination_mac}) + pkt.save_pcapfile(self.tester, "%s/multi_path.pcap" % (self.out_path)) + tgen_input.append((tx_port, rx_port, "%s/multi_path.pcap" % (self.out_path))) self.tester.pktgen.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None, self.tester.pktgen) @@ -102,15 +126,16 @@ class TestPVPMultiPathPerformance(TestCase): traffic_opt = {'delay': 5} _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=traffic_opt) Mpps = pps / 1000000.0 - self.verify(Mpps > 0, "%s can not receive packets of frame size %d" % (self.running_case, frame_size)) + self.verify(Mpps > 0.0, "%s can not receive packets of frame size %d" % (self.running_case, frame_size)) - throughput = Mpps * 100 / \ - float(self.wirespeed(self.nic, frame_size, self.number_of_ports)) + self.throughput[frame_size][self.nb_desc] = Mpps + linerate = Mpps * 100 / \ + float(self.wirespeed(self.nic, frame_size, self.number_of_ports)) results_row = [frame_size] results_row.append(case_info) results_row.append(Mpps) - results_row.append(throughput) + results_row.append(linerate) self.result_table_add(results_row) @property @@ -129,7 +154,8 @@ class TestPVPMultiPathPerformance(TestCase): eal_param = self.dut.create_eal_parameters(cores=self.core_list_host, prefix='vhost', ports=[self.dut.ports_info[self.dut_ports[0]]['pci']], vdevs=['net_vhost0,iface=vhost-net,queues=1,client=0']) - command_line_client = "./%s/app/testpmd " % self.target + eal_param + " -- -i --nb-cores=1 --txd=1024 --rxd=1024" + command_line_client = "./%s/app/testpmd " % self.target + eal_param + \ + " -- -i --nb-cores=1 --txd=%d --rxd=%d" % (self.nb_desc, self.nb_desc) self.vhost.send_expect(command_line_client, "testpmd> ", 120) self.vhost.send_expect("set fwd mac", "testpmd> ", 120) self.vhost.send_expect("start", "testpmd> ", 120) @@ -144,13 +170,109 @@ class TestPVPMultiPathPerformance(TestCase): args["version"]]) if self.check_2M_env: eal_param += " --single-file-segments" - command_line_user = "./%s/app/testpmd " % self.target + eal_param + " -- -i %s --rss-ip --nb-cores=1 --txd=1024 --rxd=1024" % \ - args["path"] + command_line_user = "./%s/app/testpmd " % self.target + eal_param + \ + " -- -i %s --rss-ip --nb-cores=1 --txd=%d --rxd=%d" % \ + (args["path"], self.nb_desc, self.nb_desc) self.vhost_user = self.dut.new_session(suite="user") self.vhost_user.send_expect(command_line_user, "testpmd> ", 120) self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120) self.vhost_user.send_expect("start", "testpmd> ", 120) + def handle_expected(self): + """ + Update expected numbers to configurate file: conf/$suite_name.cfg + """ + if load_global_setting(UPDATE_EXPECTED) == "yes": + for frame_size in self.test_parameters.keys(): + for nb_desc in self.test_parameters[frame_size]: + self.expected_throughput[frame_size][nb_desc] = round(self.throughput[frame_size][nb_desc], 3) + + def handle_results(self): + """ + results handled process: + 1, save to self.test_results + 2, create test results table + 3, save to json file for Open Lab + """ + header = self.table_header + header.append("Expected Throughput") + header.append("Throughput Difference") + for frame_size in self.test_parameters.keys(): + wirespeed = self.wirespeed(self.nic, frame_size, self.number_of_ports) + ret_datas = {} + for nb_desc in self.test_parameters[frame_size]: + ret_data = {} + ret_data[header[0]] = frame_size + ret_data[header[1]] = nb_desc + ret_data[header[2]] = "{:.3f} Mpps".format( + self.throughput[frame_size][nb_desc]) + ret_data[header[3]] = "{:.3f}%".format( + self.throughput[frame_size][nb_desc] * 100 / wirespeed) + ret_data[header[4]] = "{:.3f} Mpps".format( + self.expected_throughput[frame_size][nb_desc]) + ret_data[header[5]] = "{:.3f} Mpps".format( + self.throughput[frame_size][nb_desc] - + self.expected_throughput[frame_size][nb_desc]) + ret_datas[nb_desc] = deepcopy(ret_data) + self.test_result[frame_size] = deepcopy(ret_datas) + # Create test results table + self.result_table_create(header) + for frame_size in self.test_parameters.keys(): + for nb_desc in self.test_parameters[frame_size]: + table_row = list() + for i in range(len(header)): + table_row.append( + self.test_result[frame_size][nb_desc][header[i]]) + self.result_table_add(table_row) + # present test results to screen + self.result_table_print() + # save test results as a file + if self.save_result_flag: + self.save_result(self.test_result) + + def save_result(self, data): + ''' + Saves the test results as a separated file named with + self.nic+_perf_virtio_user_pvp.json in output folder + if self.save_result_flag is True + ''' + case_name = self.running_case + self.json_obj[case_name] = list() + status_result = [] + for frame_size in self.test_parameters.keys(): + for nb_desc in self.test_parameters[frame_size]: + row_in = self.test_result[frame_size][nb_desc] + row_dict0 = dict() + row_dict0['performance'] = list() + row_dict0['parameters'] = list() + row_dict0['parameters'] = list() + result_throughput = float(row_in['Mpps'].split()[0]) + expected_throughput = float(row_in['Expected Throughput'].split()[0]) + # delta value and accepted tolerance in percentage + delta = result_throughput - expected_throughput + gap = expected_throughput * -self.gap * 0.01 + delta = float(delta) + gap = float(gap) + self.logger.info("Accept tolerance are (Mpps) %f" % gap) + self.logger.info("Throughput Difference are (Mpps) %f" % delta) + if result_throughput > expected_throughput + gap: + row_dict0['status'] = 'PASS' + else: + row_dict0['status'] = 'FAIL' + row_dict1 = dict(name="Throughput", value=result_throughput, unit="Mpps", delta=delta) + row_dict2 = dict(name="Txd/Rxd", value=row_in["Mode/RXD-TXD"], unit="descriptor") + row_dict3 = dict(name="frame_size", value=row_in["Frame"], unit="bytes") + row_dict0['performance'].append(row_dict1) + row_dict0['parameters'].append(row_dict2) + row_dict0['parameters'].append(row_dict3) + self.json_obj[case_name].append(row_dict0) + status_result.append(row_dict0['status']) + with open(os.path.join(rst.path2Result, + '{0:s}_{1}.json'.format( + self.nic, self.suite_name)), 'w') as fp: + json.dump(self.json_obj, fp) + self.verify("FAIL" not in status_result, "Exceeded Gap") + def close_all_testpmd(self): """ close all testpmd of vhost and virtio @@ -169,118 +291,163 @@ class TestPVPMultiPathPerformance(TestCase): """ performance for PVP virtio 1.1 Mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1_mergeable on") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_virtio11_normal(self): """ performance for PVP virtio1.1 Normal Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1_normal") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_virtio11_inorder_mergeable(self): """ performance for PVP virtio 1.1 inorder Mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1_inorder_mergeable on") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_virtio11_inorder_normal(self): """ performance for PVP virtio1.1 inorder Normal Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virtio_1.1_inorder_normal") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_inorder_mergeable(self): """ performance for PVP In_order mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("inoder mergeable on") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_inorder_no_mergeable(self): """ performance for PVP In_order no_mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("inoder mergeable off") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_mergeable(self): """ performance for PVP Mergeable Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virito mergeable") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_normal(self): """ performance for PVP Normal Path. """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0", "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virito normal") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def test_perf_pvp_vector_rx(self): """ performance for PVP Vector Path """ + self.test_target = self.running_case + self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0", "path": "--tx-offloads=0x0 "} self.start_vhost_testpmd() self.start_virtio_testpmd(virtio_pmd_arg) self.send_and_verify("virito vector rx") self.close_all_testpmd() + self.logger.info('result of all framesize result') self.result_table_print() + self.handle_expected() + self.handle_results() def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("killall -s INT testpmd", "#") - self.close_all_session() + self.dut.kill_all() def tear_down_all(self): """ Run after each test suite. """ + self.close_all_session() -- 2.7.4