From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 37D8DA04C1; Fri, 22 Nov 2019 06:33:07 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C9A792C28; Fri, 22 Nov 2019 06:33:06 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 880262C08 for ; Fri, 22 Nov 2019 06:33:05 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 21 Nov 2019 21:33:04 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,228,1571727600"; d="scan'208";a="381979107" Received: from fmsmsx105.amr.corp.intel.com ([10.18.124.203]) by orsmga005.jf.intel.com with ESMTP; 21 Nov 2019 21:33:04 -0800 Received: from fmsmsx608.amr.corp.intel.com (10.18.126.88) by FMSMSX105.amr.corp.intel.com (10.18.124.203) with Microsoft SMTP Server (TLS) id 14.3.439.0; Thu, 21 Nov 2019 21:33:04 -0800 Received: from fmsmsx608.amr.corp.intel.com (10.18.126.88) by fmsmsx608.amr.corp.intel.com (10.18.126.88) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.1713.5; Thu, 21 Nov 2019 21:33:03 -0800 Received: from shsmsx103.ccr.corp.intel.com (10.239.4.69) by fmsmsx608.amr.corp.intel.com (10.18.126.88) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256) id 15.1.1713.5 via Frontend Transport; Thu, 21 Nov 2019 21:33:03 -0800 Received: from shsmsx101.ccr.corp.intel.com ([169.254.1.213]) by SHSMSX103.ccr.corp.intel.com ([169.254.4.60]) with mapi id 14.03.0439.000; Fri, 22 Nov 2019 13:33:01 +0800 From: "Tu, Lijuan" To: "Ma, LihongX" , "dts@dpdk.org" CC: "Ma, LihongX" Thread-Topic: [dts] [PATCH V2] tests/pvp_multi_paths: reduce running time by reducing start testpmd times Thread-Index: AQHVksBC4CrdTiVhoU2Z4/fcd6AQ86eWxxNg Date: Fri, 22 Nov 2019 05:33:01 +0000 Message-ID: <8CE3E05A3F976642AAB0F4675D0AD20E0BB676C2@SHSMSX101.ccr.corp.intel.com> References: <1572811503-2122-1-git-send-email-lihongx.ma@intel.com> In-Reply-To: <1572811503-2122-1-git-send-email-lihongx.ma@intel.com> Accept-Language: zh-CN, en-US Content-Language: en-US X-MS-Has-Attach: X-MS-TNEF-Correlator: dlp-product: dlpe-windows dlp-version: 11.2.0.6 dlp-reaction: no-action x-ctpclassification: CTP_NT x-titus-metadata-40: eyJDYXRlZ29yeUxhYmVscyI6IiIsIk1ldGFkYXRhIjp7Im5zIjoiaHR0cDpcL1wvd3d3LnRpdHVzLmNvbVwvbnNcL0ludGVsMyIsImlkIjoiMGU3NDVlNjYtNjA2OC00YTViLWJjMGQtMTVmZjgxMzM4NWQ1IiwicHJvcHMiOlt7Im4iOiJDVFBDbGFzc2lmaWNhdGlvbiIsInZhbHMiOlt7InZhbHVlIjoiQ1RQX05UIn1dfV19LCJTdWJqZWN0TGFiZWxzIjpbXSwiVE1DVmVyc2lvbiI6IjE3LjEwLjE4MDQuNDkiLCJUcnVzdGVkTGFiZWxIYXNoIjoiQWF3blpmRktlQ3NHNUZNSmNsVXQ5T3gxQnk2NURHSTlDWXh5UTFZd2VBSHRUK1BEOWt3N0NXTVFzVlwvam85UG8ifQ== x-originating-ip: [10.239.127.40] Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Subject: Re: [dts] [PATCH V2] tests/pvp_multi_paths: reduce running time by reducing start testpmd times X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" Applied, thanks > -----Original Message----- > From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong > Sent: Monday, November 4, 2019 4:05 AM > To: dts@dpdk.org > Cc: Ma, LihongX > Subject: [dts] [PATCH V2] tests/pvp_multi_paths: reduce running time by > reducing start testpmd times >=20 > Signed-off-by: lihong > --- > tests/TestSuite_pvp_multi_paths_performance.py | 118 +++++++++------= --- > ...vp_multi_paths_vhost_single_core_performance.py | 132 +++++++++------= - > ----- ...p_multi_paths_virtio_single_core_performance.py | 118 +++++++++= --- > ------ > 3 files changed, 169 insertions(+), 199 deletions(-) >=20 > diff --git a/tests/TestSuite_pvp_multi_paths_performance.py > b/tests/TestSuite_pvp_multi_paths_performance.py > index 65c785a..fe6bd11 100644 > --- a/tests/TestSuite_pvp_multi_paths_performance.py > +++ b/tests/TestSuite_pvp_multi_paths_performance.py > @@ -83,40 +83,41 @@ class TestPVPMultiPathPerformance(TestCase): > self.table_header.append("% linerate") > self.result_table_create(self.table_header) >=20 > - def send_and_verify(self, case_info, frame_size): > + def send_and_verify(self, case_info): > """ > Send packet with packet generator and verify > """ > - tgen_input =3D [] > - for port in xrange(self.number_of_ports): > - rx_port =3D self.tester.get_local_port( > - self.dut_ports[port % self.number_of_ports]) > - tx_port =3D self.tester.get_local_port( > - self.dut_ports[(port) % self.number_of_ports]) > - destination_mac =3D self.dut.get_mac_address( > - self.dut_ports[(port) % self.number_of_ports]) > + for frame_size in self.frame_sizes: > + tgen_input =3D [] > + for port in xrange(self.number_of_ports): > + rx_port =3D self.tester.get_local_port( > + self.dut_ports[port % self.number_of_ports]) > + tx_port =3D self.tester.get_local_port( > + self.dut_ports[(port) % self.number_of_ports]) > + destination_mac =3D self.dut.get_mac_address( > + self.dut_ports[(port) % self.number_of_ports]) >=20 > - pkt =3D Packet(pkt_type=3D'UDP', pkt_len=3Dframe_size) > - pkt.config_layer('ether', {'dst': '%s' % destination_mac}) > - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % > (self.out_path, port)) > - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap"= % > (self.out_path, port))) > + pkt =3D Packet(pkt_type=3D'UDP', pkt_len=3Dframe_size) > + pkt.config_layer('ether', {'dst': '%s' % destination_mac= }) > + pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % > (self.out_path, port)) > + tgen_input.append((tx_port, rx_port, > + "%s/multi_path_%d.pcap" % (self.out_path, port))) >=20 > - self.tester.pktgen.clear_streams() > - streams =3D self.pktgen_helper.prepare_stream_from_tginput(tgen_= input, > 100, None, self.tester.pktgen) > - # set traffic option > - traffic_opt =3D {'delay': 5} > - _, pps =3D self.tester.pktgen.measure_throughput(stream_ids=3Dst= reams, > options=3Dtraffic_opt) > - Mpps =3D pps / 1000000.0 > - self.verify(Mpps > 0, "%s can not receive packets of frame size = %d" % > (self.running_case, frame_size)) > + self.tester.pktgen.clear_streams() > + streams =3D > self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None, > self.tester.pktgen) > + # set traffic option > + traffic_opt =3D {'delay': 5} > + _, pps =3D self.tester.pktgen.measure_throughput(stream_ids= =3Dstreams, > options=3Dtraffic_opt) > + Mpps =3D pps / 1000000.0 > + self.verify(Mpps > 0, "%s can not receive packets of frame > + size %d" % (self.running_case, frame_size)) >=20 > - throughput =3D Mpps * 100 / \ > - float(self.wirespeed(self.nic, frame_size, self.num= ber_of_ports)) > + throughput =3D Mpps * 100 / \ > + float(self.wirespeed(self.nic, frame_size, > + self.number_of_ports)) >=20 > - results_row =3D [frame_size] > - results_row.append(case_info) > - results_row.append(Mpps) > - results_row.append(throughput) > - self.result_table_add(results_row) > + results_row =3D [frame_size] > + results_row.append(case_info) > + results_row.append(Mpps) > + results_row.append(throughput) > + self.result_table_add(results_row) >=20 > def start_vhost_testpmd(self): > """ > @@ -172,11 +173,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "in_order=3D0,packed_vq=3D1,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio_1.1_mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio_1.1_mergeable on") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_pvp_virtio11_normal(self): > @@ -185,11 +185,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "in_order=3D0,packed_vq=3D1,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio_1.1_normal", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio_1.1_normal") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_pvp_inorder_mergeable(self): > @@ -198,11 +197,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D1,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("inoder mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("inoder mergeable on") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_pvp_inorder_no_mergeable(self): > @@ -211,11 +209,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D1,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vlan-st= rip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("inoder mergeable off", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("inoder mergeable off") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_pvp_mergeable(self): > @@ -224,11 +221,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virito mergeable", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virito mergeable") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_pvp_normal(self): > @@ -237,11 +233,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virito normal", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virito normal") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_pvp_vector_rx(self): > @@ -250,11 +245,10 @@ class TestPVPMultiPathPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 "} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virito vector rx", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virito vector rx") > + self.close_all_testpmd() > self.result_table_print() >=20 > def tear_down(self): > diff --git > a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py > b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py > index 29d87c2..fbdb939 100644 > --- a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py > +++ b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py > @@ -79,40 +79,43 @@ class TestPVPMultiPathVhostPerformance(TestCase): > self.table_header.append("Mpps") > self.table_header.append("% linerate") > self.result_table_create(self.table_header) > + self.vhost_user =3D self.dut.new_session(suite=3D"user") > + self.vhost =3D self.dut.new_session(suite=3D"vhost") >=20 > - def send_and_verify(self, case_info, frame_size): > + def send_and_verify(self, case_info): > """ > Send packet with packet generator and verify > """ > - tgen_input =3D [] > - for port in xrange(self.number_of_ports): > - rx_port =3D self.tester.get_local_port( > - self.dut_ports[port % self.number_of_ports]) > - tx_port =3D self.tester.get_local_port( > - self.dut_ports[(port) % self.number_of_ports]) > - destination_mac =3D self.dut.get_mac_address( > - self.dut_ports[(port) % self.number_of_ports]) > + for frame_size in self.frame_sizes: > + tgen_input =3D [] > + for port in xrange(self.number_of_ports): > + rx_port =3D self.tester.get_local_port( > + self.dut_ports[port % self.number_of_ports]) > + tx_port =3D self.tester.get_local_port( > + self.dut_ports[(port) % self.number_of_ports]) > + destination_mac =3D self.dut.get_mac_address( > + self.dut_ports[(port) % self.number_of_ports]) >=20 > - pkt =3D Packet(pkt_type=3D'UDP', pkt_len=3Dframe_size) > - pkt.config_layer('ether', {'dst': '%s' % destination_mac}) > - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % > (self.out_path, port)) > - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap"= % > (self.out_path, port))) > + pkt =3D Packet(pkt_type=3D'UDP', pkt_len=3Dframe_size) > + pkt.config_layer('ether', {'dst': '%s' % destination_mac= }) > + pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % > (self.out_path, port)) > + tgen_input.append((tx_port, rx_port, > + "%s/multi_path_%d.pcap" % (self.out_path, port))) >=20 > - self.tester.pktgen.clear_streams() > - streams =3D self.pktgen_helper.prepare_stream_from_tginput(tgen_= input, > 100, None, self.tester.pktgen) > - traffic_opt =3D {'delay': 5} > - _, pps =3D self.tester.pktgen.measure_throughput(stream_ids=3Dst= reams, > options=3Dtraffic_opt) > - Mpps =3D pps / 1000000.0 > - self.verify(Mpps > 0, "%s can not receive packets of frame size = %d" % > (self.running_case, frame_size)) > + self.tester.pktgen.clear_streams() > + streams =3D > self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None, > self.tester.pktgen) > + traffic_opt =3D {'delay': 5} > + _, pps =3D self.tester.pktgen.measure_throughput(stream_ids= =3Dstreams, > options=3Dtraffic_opt) > + Mpps =3D pps / 1000000.0 > + self.verify(Mpps > 0, "%s can not receive packets of frame > + size %d" % (self.running_case, frame_size)) >=20 > - throughput =3D Mpps * 100 / \ > - float(self.wirespeed(self.nic, frame_size, self.num= ber_of_ports)) > + throughput =3D Mpps * 100 / \ > + float(self.wirespeed(self.nic, frame_size, > + self.number_of_ports)) >=20 > - results_row =3D [frame_size] > - results_row.append(case_info) > - results_row.append(Mpps) > - results_row.append(throughput) > - self.result_table_add(results_row) > + results_row =3D [frame_size] > + results_row.append(case_info) > + results_row.append(Mpps) > + results_row.append(throughput) > + self.result_table_add(results_row) >=20 > def start_vhost_testpmd(self): > """ > @@ -166,13 +169,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "in_order=3D0,packed_vq=3D1,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip --rss-ip"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio_1.1_mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio_1.1_mergeable on") > + self.close_all_testpmd() >=20 > self.close_all_session() > self.result_table_print() > @@ -183,13 +183,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "in_order=3D0,packed_vq=3D1,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip --rss-ip"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio_1.1 normal", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio_1.1 normal") > + self.close_all_testpmd() > self.close_all_session() > self.result_table_print() >=20 > @@ -199,13 +196,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D1,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip --rss-ip"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("inoder mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("inoder mergeable on") > + self.close_all_testpmd() > self.close_all_session() > self.result_table_print() >=20 > @@ -215,13 +209,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D1,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vlan-st= rip --rss-ip"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("inoder mergeable off", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("inoder mergeable off") > + self.close_all_testpmd() > self.close_all_session() > self.result_table_print() >=20 > @@ -231,13 +222,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip --rss-ip"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("mergeable on") > + self.close_all_testpmd() > self.close_all_session() > self.result_table_print() >=20 > @@ -247,13 +235,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip --rss-ip"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("normal", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("normal") > + self.close_all_testpmd() > self.close_all_session() > self.result_table_print() >=20 > @@ -263,13 +248,10 @@ class > TestPVPMultiPathVhostPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0"} > - self.vhost_user =3D self.dut.new_session(suite=3D"user") > - self.vhost =3D self.dut.new_session(suite=3D"vhost") > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("vector rx", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("vector rx") > + self.close_all_testpmd() > self.close_all_session() > self.result_table_print() >=20 > diff --git > a/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py > b/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py > index e8807a7..8850b10 100644 > --- a/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py > +++ b/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py > @@ -83,41 +83,42 @@ class TestPVPMultiPathVirtioPerformance(TestCase): > self.table_header.append("% linerate") > self.result_table_create(self.table_header) >=20 > - def send_and_verify(self, case_info, frame_size): > + def send_and_verify(self, case_info): > """ > Send packet with packet generator and verify > """ > - tgen_input =3D [] > - for port in xrange(self.number_of_ports): > - rx_port =3D self.tester.get_local_port( > - self.dut_ports[port % self.number_of_ports]) > - tx_port =3D self.tester.get_local_port( > - self.dut_ports[(port) % self.number_of_ports]) > - destination_mac =3D self.dut.get_mac_address( > - self.dut_ports[(port) % self.number_of_ports]) > + for frame_size in self.frame_sizes: > + tgen_input =3D [] > + for port in xrange(self.number_of_ports): > + rx_port =3D self.tester.get_local_port( > + self.dut_ports[port % self.number_of_ports]) > + tx_port =3D self.tester.get_local_port( > + self.dut_ports[(port) % self.number_of_ports]) > + destination_mac =3D self.dut.get_mac_address( > + self.dut_ports[(port) % self.number_of_ports]) >=20 > - pkt =3D Packet(pkt_type=3D'UDP', pkt_len=3Dframe_size) > - pkt.config_layer('ether', {'dst': '%s' % destination_mac}) > - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" % > (self.out_path, port)) > + pkt =3D Packet(pkt_type=3D'UDP', pkt_len=3Dframe_size) > + pkt.config_layer('ether', {'dst': '%s' % destination_mac= }) > + pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" > + % (self.out_path, port)) >=20 > - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap"= % > (self.out_path, port))) > + tgen_input.append((tx_port, rx_port, > + "%s/multi_path_%d.pcap" % (self.out_path, port))) >=20 > - self.tester.pktgen.clear_streams() > - streams =3D self.pktgen_helper.prepare_stream_from_tginput(tgen_= input, > 100, None, self.tester.pktgen) > - # set traffic option > - traffic_opt =3D {'delay': 5} > - _, pps =3D self.tester.pktgen.measure_throughput(stream_ids=3Dst= reams, > options=3Dtraffic_opt) > - Mpps =3D pps / 1000000.0 > - self.verify(Mpps > 0, "%s can not receive packets of frame size = %d" % > (self.running_case, frame_size)) > + self.tester.pktgen.clear_streams() > + streams =3D > self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None, > self.tester.pktgen) > + # set traffic option > + traffic_opt =3D {'delay': 5} > + _, pps =3D self.tester.pktgen.measure_throughput(stream_ids= =3Dstreams, > options=3Dtraffic_opt) > + Mpps =3D pps / 1000000.0 > + self.verify(Mpps > 0, "%s can not receive packets of frame > + size %d" % (self.running_case, frame_size)) >=20 > - throughput =3D Mpps * 100 / \ > - float(self.wirespeed(self.nic, frame_size, self.num= ber_of_ports)) > + throughput =3D Mpps * 100 / \ > + float(self.wirespeed(self.nic, frame_size, > + self.number_of_ports)) >=20 > - results_row =3D [frame_size] > - results_row.append(case_info) > - results_row.append(Mpps) > - results_row.append(throughput) > - self.result_table_add(results_row) > + results_row =3D [frame_size] > + results_row.append(case_info) > + results_row.append(Mpps) > + results_row.append(throughput) > + self.result_table_add(results_row) >=20 > def start_vhost_testpmd(self): > """ > @@ -172,11 +173,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D1,in_order=3D0,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio_1.1_mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio_1.1_mergeable on") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_virtio_single_core_virtio11_normal(self): > @@ -185,11 +185,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D1,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio_1.1_normal", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio_1.1_normal") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_virtio_single_core_inorder_mergeable(self): > @@ -198,11 +197,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D1,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("inoder mergeable on", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("inoder mergeable on") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_virtio_single_core_inorder_no_mergeable(self): > @@ -211,11 +209,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D1,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vlan-st= rip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("inoder mergeable off", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("inoder mergeable off") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_virtio_single_core_mergeable(self): > @@ -224,11 +221,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D1", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virito mergeable", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virito mergeable") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_virtio_single_core_normal(self): > @@ -237,11 +233,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0 --enable-hw-vla= n-strip"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virito normal", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virito normal") > + self.close_all_testpmd() > self.result_table_print() >=20 > def test_perf_virtio_single_core_vector_rx(self): > @@ -250,11 +245,10 @@ class > TestPVPMultiPathVirtioPerformance(TestCase): > """ > virtio_pmd_arg =3D {"version": "packed_vq=3D0,in_order=3D0,mrg_r= xbuf=3D0", > "path": "--tx-offloads=3D0x0"} > - for frame_size in self.frame_sizes: > - self.start_vhost_testpmd() > - self.start_virtio_testpmd(virtio_pmd_arg) > - self.send_and_verify("virtio vector rx", frame_size) > - self.close_all_testpmd() > + self.start_vhost_testpmd() > + self.start_virtio_testpmd(virtio_pmd_arg) > + self.send_and_verify("virtio vector rx") > + self.close_all_testpmd() > self.result_table_print() >=20 > def tear_down(self): > -- > 2.7.4