From: "Tu, Lijuan" <lijuan.tu@intel.com>
To: "Ma, LihongX" <lihongx.ma@intel.com>, "dts@dpdk.org" <dts@dpdk.org>
Cc: "Ma, LihongX" <lihongx.ma@intel.com>
Subject: Re: [dts] [PATCH V2] tests/pvp_multi_paths: reduce running time by reducing start testpmd times
Date: Fri, 22 Nov 2019 05:33:01 +0000 [thread overview]
Message-ID: <8CE3E05A3F976642AAB0F4675D0AD20E0BB676C2@SHSMSX101.ccr.corp.intel.com> (raw)
In-Reply-To: <1572811503-2122-1-git-send-email-lihongx.ma@intel.com>
Applied, thanks
> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong
> Sent: Monday, November 4, 2019 4:05 AM
> To: dts@dpdk.org
> Cc: Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts] [PATCH V2] tests/pvp_multi_paths: reduce running time by
> reducing start testpmd times
>
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
> tests/TestSuite_pvp_multi_paths_performance.py | 118 +++++++++---------
> ...vp_multi_paths_vhost_single_core_performance.py | 132 +++++++++-------
> ----- ...p_multi_paths_virtio_single_core_performance.py | 118 +++++++++---
> ------
> 3 files changed, 169 insertions(+), 199 deletions(-)
>
> diff --git a/tests/TestSuite_pvp_multi_paths_performance.py
> b/tests/TestSuite_pvp_multi_paths_performance.py
> index 65c785a..fe6bd11 100644
> --- a/tests/TestSuite_pvp_multi_paths_performance.py
> +++ b/tests/TestSuite_pvp_multi_paths_performance.py
> @@ -83,40 +83,41 @@ class TestPVPMultiPathPerformance(TestCase):
> self.table_header.append("% linerate")
> self.result_table_create(self.table_header)
>
> - def send_and_verify(self, case_info, frame_size):
> + def send_and_verify(self, case_info):
> """
> Send packet with packet generator and verify
> """
> - tgen_input = []
> - for port in xrange(self.number_of_ports):
> - rx_port = self.tester.get_local_port(
> - self.dut_ports[port % self.number_of_ports])
> - tx_port = self.tester.get_local_port(
> - self.dut_ports[(port) % self.number_of_ports])
> - destination_mac = self.dut.get_mac_address(
> - self.dut_ports[(port) % self.number_of_ports])
> + for frame_size in self.frame_sizes:
> + tgen_input = []
> + for port in xrange(self.number_of_ports):
> + rx_port = self.tester.get_local_port(
> + self.dut_ports[port % self.number_of_ports])
> + tx_port = self.tester.get_local_port(
> + self.dut_ports[(port) % self.number_of_ports])
> + destination_mac = self.dut.get_mac_address(
> + self.dut_ports[(port) % self.number_of_ports])
>
> - pkt = Packet(pkt_type='UDP', pkt_len=frame_size)
> - pkt.config_layer('ether', {'dst': '%s' % destination_mac})
> - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" %
> (self.out_path, port))
> - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap" %
> (self.out_path, port)))
> + pkt = Packet(pkt_type='UDP', pkt_len=frame_size)
> + pkt.config_layer('ether', {'dst': '%s' % destination_mac})
> + pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" %
> (self.out_path, port))
> + tgen_input.append((tx_port, rx_port,
> + "%s/multi_path_%d.pcap" % (self.out_path, port)))
>
> - self.tester.pktgen.clear_streams()
> - streams = self.pktgen_helper.prepare_stream_from_tginput(tgen_input,
> 100, None, self.tester.pktgen)
> - # set traffic option
> - traffic_opt = {'delay': 5}
> - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams,
> options=traffic_opt)
> - Mpps = pps / 1000000.0
> - self.verify(Mpps > 0, "%s can not receive packets of frame size %d" %
> (self.running_case, frame_size))
> + self.tester.pktgen.clear_streams()
> + streams =
> self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None,
> self.tester.pktgen)
> + # set traffic option
> + traffic_opt = {'delay': 5}
> + _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams,
> options=traffic_opt)
> + Mpps = pps / 1000000.0
> + self.verify(Mpps > 0, "%s can not receive packets of frame
> + size %d" % (self.running_case, frame_size))
>
> - throughput = Mpps * 100 / \
> - float(self.wirespeed(self.nic, frame_size, self.number_of_ports))
> + throughput = Mpps * 100 / \
> + float(self.wirespeed(self.nic, frame_size,
> + self.number_of_ports))
>
> - results_row = [frame_size]
> - results_row.append(case_info)
> - results_row.append(Mpps)
> - results_row.append(throughput)
> - self.result_table_add(results_row)
> + results_row = [frame_size]
> + results_row.append(case_info)
> + results_row.append(Mpps)
> + results_row.append(throughput)
> + self.result_table_add(results_row)
>
> def start_vhost_testpmd(self):
> """
> @@ -172,11 +173,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio_1.1_mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio_1.1_mergeable on")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_pvp_virtio11_normal(self):
> @@ -185,11 +185,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio_1.1_normal", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio_1.1_normal")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_pvp_inorder_mergeable(self):
> @@ -198,11 +197,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("inoder mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("inoder mergeable on")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_pvp_inorder_no_mergeable(self):
> @@ -211,11 +209,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("inoder mergeable off", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("inoder mergeable off")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_pvp_mergeable(self):
> @@ -224,11 +221,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virito mergeable", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virito mergeable")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_pvp_normal(self):
> @@ -237,11 +233,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virito normal", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virito normal")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_pvp_vector_rx(self):
> @@ -250,11 +245,10 @@ class TestPVPMultiPathPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 "}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virito vector rx", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virito vector rx")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def tear_down(self):
> diff --git
> a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py
> b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py
> index 29d87c2..fbdb939 100644
> --- a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py
> +++ b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py
> @@ -79,40 +79,43 @@ class TestPVPMultiPathVhostPerformance(TestCase):
> self.table_header.append("Mpps")
> self.table_header.append("% linerate")
> self.result_table_create(self.table_header)
> + self.vhost_user = self.dut.new_session(suite="user")
> + self.vhost = self.dut.new_session(suite="vhost")
>
> - def send_and_verify(self, case_info, frame_size):
> + def send_and_verify(self, case_info):
> """
> Send packet with packet generator and verify
> """
> - tgen_input = []
> - for port in xrange(self.number_of_ports):
> - rx_port = self.tester.get_local_port(
> - self.dut_ports[port % self.number_of_ports])
> - tx_port = self.tester.get_local_port(
> - self.dut_ports[(port) % self.number_of_ports])
> - destination_mac = self.dut.get_mac_address(
> - self.dut_ports[(port) % self.number_of_ports])
> + for frame_size in self.frame_sizes:
> + tgen_input = []
> + for port in xrange(self.number_of_ports):
> + rx_port = self.tester.get_local_port(
> + self.dut_ports[port % self.number_of_ports])
> + tx_port = self.tester.get_local_port(
> + self.dut_ports[(port) % self.number_of_ports])
> + destination_mac = self.dut.get_mac_address(
> + self.dut_ports[(port) % self.number_of_ports])
>
> - pkt = Packet(pkt_type='UDP', pkt_len=frame_size)
> - pkt.config_layer('ether', {'dst': '%s' % destination_mac})
> - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" %
> (self.out_path, port))
> - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap" %
> (self.out_path, port)))
> + pkt = Packet(pkt_type='UDP', pkt_len=frame_size)
> + pkt.config_layer('ether', {'dst': '%s' % destination_mac})
> + pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" %
> (self.out_path, port))
> + tgen_input.append((tx_port, rx_port,
> + "%s/multi_path_%d.pcap" % (self.out_path, port)))
>
> - self.tester.pktgen.clear_streams()
> - streams = self.pktgen_helper.prepare_stream_from_tginput(tgen_input,
> 100, None, self.tester.pktgen)
> - traffic_opt = {'delay': 5}
> - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams,
> options=traffic_opt)
> - Mpps = pps / 1000000.0
> - self.verify(Mpps > 0, "%s can not receive packets of frame size %d" %
> (self.running_case, frame_size))
> + self.tester.pktgen.clear_streams()
> + streams =
> self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None,
> self.tester.pktgen)
> + traffic_opt = {'delay': 5}
> + _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams,
> options=traffic_opt)
> + Mpps = pps / 1000000.0
> + self.verify(Mpps > 0, "%s can not receive packets of frame
> + size %d" % (self.running_case, frame_size))
>
> - throughput = Mpps * 100 / \
> - float(self.wirespeed(self.nic, frame_size, self.number_of_ports))
> + throughput = Mpps * 100 / \
> + float(self.wirespeed(self.nic, frame_size,
> + self.number_of_ports))
>
> - results_row = [frame_size]
> - results_row.append(case_info)
> - results_row.append(Mpps)
> - results_row.append(throughput)
> - self.result_table_add(results_row)
> + results_row = [frame_size]
> + results_row.append(case_info)
> + results_row.append(Mpps)
> + results_row.append(throughput)
> + self.result_table_add(results_row)
>
> def start_vhost_testpmd(self):
> """
> @@ -166,13 +169,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio_1.1_mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio_1.1_mergeable on")
> + self.close_all_testpmd()
>
> self.close_all_session()
> self.result_table_print()
> @@ -183,13 +183,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio_1.1 normal", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio_1.1 normal")
> + self.close_all_testpmd()
> self.close_all_session()
> self.result_table_print()
>
> @@ -199,13 +196,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("inoder mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("inoder mergeable on")
> + self.close_all_testpmd()
> self.close_all_session()
> self.result_table_print()
>
> @@ -215,13 +209,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("inoder mergeable off", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("inoder mergeable off")
> + self.close_all_testpmd()
> self.close_all_session()
> self.result_table_print()
>
> @@ -231,13 +222,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("mergeable on")
> + self.close_all_testpmd()
> self.close_all_session()
> self.result_table_print()
>
> @@ -247,13 +235,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("normal", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("normal")
> + self.close_all_testpmd()
> self.close_all_session()
> self.result_table_print()
>
> @@ -263,13 +248,10 @@ class
> TestPVPMultiPathVhostPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0"}
> - self.vhost_user = self.dut.new_session(suite="user")
> - self.vhost = self.dut.new_session(suite="vhost")
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("vector rx", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("vector rx")
> + self.close_all_testpmd()
> self.close_all_session()
> self.result_table_print()
>
> diff --git
> a/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py
> b/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py
> index e8807a7..8850b10 100644
> --- a/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py
> +++ b/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py
> @@ -83,41 +83,42 @@ class TestPVPMultiPathVirtioPerformance(TestCase):
> self.table_header.append("% linerate")
> self.result_table_create(self.table_header)
>
> - def send_and_verify(self, case_info, frame_size):
> + def send_and_verify(self, case_info):
> """
> Send packet with packet generator and verify
> """
> - tgen_input = []
> - for port in xrange(self.number_of_ports):
> - rx_port = self.tester.get_local_port(
> - self.dut_ports[port % self.number_of_ports])
> - tx_port = self.tester.get_local_port(
> - self.dut_ports[(port) % self.number_of_ports])
> - destination_mac = self.dut.get_mac_address(
> - self.dut_ports[(port) % self.number_of_ports])
> + for frame_size in self.frame_sizes:
> + tgen_input = []
> + for port in xrange(self.number_of_ports):
> + rx_port = self.tester.get_local_port(
> + self.dut_ports[port % self.number_of_ports])
> + tx_port = self.tester.get_local_port(
> + self.dut_ports[(port) % self.number_of_ports])
> + destination_mac = self.dut.get_mac_address(
> + self.dut_ports[(port) % self.number_of_ports])
>
> - pkt = Packet(pkt_type='UDP', pkt_len=frame_size)
> - pkt.config_layer('ether', {'dst': '%s' % destination_mac})
> - pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap" %
> (self.out_path, port))
> + pkt = Packet(pkt_type='UDP', pkt_len=frame_size)
> + pkt.config_layer('ether', {'dst': '%s' % destination_mac})
> + pkt.save_pcapfile(self.tester, "%s/multi_path_%d.pcap"
> + % (self.out_path, port))
>
> - tgen_input.append((tx_port, rx_port, "%s/multi_path_%d.pcap" %
> (self.out_path, port)))
> + tgen_input.append((tx_port, rx_port,
> + "%s/multi_path_%d.pcap" % (self.out_path, port)))
>
> - self.tester.pktgen.clear_streams()
> - streams = self.pktgen_helper.prepare_stream_from_tginput(tgen_input,
> 100, None, self.tester.pktgen)
> - # set traffic option
> - traffic_opt = {'delay': 5}
> - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams,
> options=traffic_opt)
> - Mpps = pps / 1000000.0
> - self.verify(Mpps > 0, "%s can not receive packets of frame size %d" %
> (self.running_case, frame_size))
> + self.tester.pktgen.clear_streams()
> + streams =
> self.pktgen_helper.prepare_stream_from_tginput(tgen_input, 100, None,
> self.tester.pktgen)
> + # set traffic option
> + traffic_opt = {'delay': 5}
> + _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams,
> options=traffic_opt)
> + Mpps = pps / 1000000.0
> + self.verify(Mpps > 0, "%s can not receive packets of frame
> + size %d" % (self.running_case, frame_size))
>
> - throughput = Mpps * 100 / \
> - float(self.wirespeed(self.nic, frame_size, self.number_of_ports))
> + throughput = Mpps * 100 / \
> + float(self.wirespeed(self.nic, frame_size,
> + self.number_of_ports))
>
> - results_row = [frame_size]
> - results_row.append(case_info)
> - results_row.append(Mpps)
> - results_row.append(throughput)
> - self.result_table_add(results_row)
> + results_row = [frame_size]
> + results_row.append(case_info)
> + results_row.append(Mpps)
> + results_row.append(throughput)
> + self.result_table_add(results_row)
>
> def start_vhost_testpmd(self):
> """
> @@ -172,11 +173,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=1,in_order=0,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio_1.1_mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio_1.1_mergeable on")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_virtio_single_core_virtio11_normal(self):
> @@ -185,11 +185,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=1,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio_1.1_normal", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio_1.1_normal")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_virtio_single_core_inorder_mergeable(self):
> @@ -198,11 +197,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("inoder mergeable on", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("inoder mergeable on")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_virtio_single_core_inorder_no_mergeable(self):
> @@ -211,11 +209,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("inoder mergeable off", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("inoder mergeable off")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_virtio_single_core_mergeable(self):
> @@ -224,11 +221,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virito mergeable", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virito mergeable")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_virtio_single_core_normal(self):
> @@ -237,11 +233,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virito normal", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virito normal")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def test_perf_virtio_single_core_vector_rx(self):
> @@ -250,11 +245,10 @@ class
> TestPVPMultiPathVirtioPerformance(TestCase):
> """
> virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
> "path": "--tx-offloads=0x0"}
> - for frame_size in self.frame_sizes:
> - self.start_vhost_testpmd()
> - self.start_virtio_testpmd(virtio_pmd_arg)
> - self.send_and_verify("virtio vector rx", frame_size)
> - self.close_all_testpmd()
> + self.start_vhost_testpmd()
> + self.start_virtio_testpmd(virtio_pmd_arg)
> + self.send_and_verify("virtio vector rx")
> + self.close_all_testpmd()
> self.result_table_print()
>
> def tear_down(self):
> --
> 2.7.4
prev parent reply other threads:[~2019-11-22 5:33 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-11-03 20:05 lihong
2019-11-13 2:09 ` Wang, Yinan
2019-11-22 5:33 ` Tu, Lijuan [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8CE3E05A3F976642AAB0F4675D0AD20E0BB676C2@SHSMSX101.ccr.corp.intel.com \
--to=lijuan.tu@intel.com \
--cc=dts@dpdk.org \
--cc=lihongx.ma@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).