From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 60508A0471 for ; Tue, 13 Aug 2019 07:53:13 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 224DF1B952; Tue, 13 Aug 2019 07:53:13 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 7C6E81B203 for ; Tue, 13 Aug 2019 07:53:11 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 12 Aug 2019 22:53:10 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,380,1559545200"; d="scan'208";a="376192288" Received: from dpdk-lihong-ub1604.sh.intel.com ([10.67.119.68]) by fmsmga006.fm.intel.com with ESMTP; 12 Aug 2019 22:53:09 -0700 From: lihong To: dts@dpdk.org Cc: lihong Date: Tue, 13 Aug 2019 06:30:01 +0800 Message-Id: <1565649001-30160-1-git-send-email-lihongx.ma@intel.com> X-Mailer: git-send-email 2.7.4 Subject: [dts] [PATCH V1] tests/vhost_multi_queue_qemu: optimization code X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" 1. update code to support trex 2. check link status after port restart 3. modify the class Name Signed-off-by: lihong --- tests/TestSuite_vhost_multi_queue_qemu.py | 50 ++++++++++++++++++------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py b/tests/TestSuite_vhost_multi_queue_qemu.py index 1232cf3..9d839d5 100644 --- a/tests/TestSuite_vhost_multi_queue_qemu.py +++ b/tests/TestSuite_vhost_multi_queue_qemu.py @@ -41,9 +41,11 @@ from test_case import TestCase from settings import HEADER_SIZE from virt_common import VM from packet import Packet, send_packets, save_packets +from pmd_output import PmdOutput +from pktgen import PacketGeneratorHelper -class TestVhostUserOneCopyOneVm(TestCase): +class TestVhostMultiQueueQemu(TestCase): def set_up_all(self): # Get and verify the ports @@ -69,6 +71,14 @@ class TestVhostUserOneCopyOneVm(TestCase): self.number_of_ports = 1 self.header_row = ["FrameSize(B)", "Throughput(Mpps)", "LineRate(%)", "Cycle"] self.memory_channel = self.dut.get_memory_channels() + self.pmd_out = PmdOutput(self.dut) + + self.out_path = '/tmp' + out = self.tester.send_expect('ls -d %s' % self.out_path, '# ') + if 'No such file or directory' in out: + self.tester.send_expect('mkdir -p %s' % self.out_path, '# ') + # create an instance to set stream field setting + self.pktgen_helper = PacketGeneratorHelper() def set_up(self): """ @@ -76,7 +86,7 @@ class TestVhostUserOneCopyOneVm(TestCase): """ self.dut.send_expect("rm -rf ./vhost.out", "#") self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT vhost-switch", "#") + self.dut.send_expect("killall -s INT testpmd", "#") self.frame_sizes = [64, 128, 256, 512, 1024, 1500] self.vm_testpmd_vector = self.target + "/app/testpmd -c %s -n 3" + \ @@ -135,33 +145,25 @@ class TestVhostUserOneCopyOneVm(TestCase): for frame_size in self.frame_sizes: info = "Running test %s, and %d frame size." % (self.running_case, frame_size) self.logger.info(info) - payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp'] + payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] tgenInput = [] pkt1 = Packet() - pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw']) + pkt1.assign_layers(['ether', 'ipv4', 'raw']) pkt1.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}), - ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})]) - pkt2 = Packet() - pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw']) - pkt2.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}), - ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})]) - pkt3 = Packet() - pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw']) - pkt3.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}), - ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})]) - pkt4 = Packet() - pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw']) - pkt4.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}), - ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})]) + ('raw', {'payload': ['01'] * int('%d' % payload_size)})]) - pkt = [pkt1, pkt2, pkt3, pkt4] - save_packets(pkt, "/root/multiqueue_2.pcap") + pkt = [pkt1] + save_packets(pkt, "%s/multiqueue.pcap" % self.out_path) port = self.tester.get_local_port(self.pf) - tgenInput.append((port, port, "multiqueue_2.pcap")) + tgenInput.append((port, port, "%s/multiqueue.pcap" % self.out_path)) - _, pps = self.tester.traffic_generator_throughput(tgenInput, delay=30) + fields_config = {'ip': {'dst': {'action': 'random'}, }, } + self.tester.pktgen.clear_streams() + streams = self.pktgen_helper.prepare_stream_from_tginput(tgenInput, 100, fields_config, self.tester.pktgen) + traffic_opt = {'delay': 5} + _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=traffic_opt) Mpps = pps / 1000000.0 pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size, self.number_of_ports)) @@ -263,6 +265,9 @@ class TestVhostUserOneCopyOneVm(TestCase): self.dut.send_expect("stop", "testpmd> ", 120) self.dut.send_expect("start", "testpmd> ", 120) + res = self.pmd_out.wait_link_status_up('all', timeout = 15) + self.verify(res is True, 'There has port link is down') + self.dut.send_expect("clear port stats all", "testpmd> ", 120) self.send_and_verify("vhost queue = virtio queue") @@ -301,6 +306,8 @@ class TestVhostUserOneCopyOneVm(TestCase): self.dut.send_expect("port start all", "testpmd>", 20) self.dut.send_expect("start", "testpmd>") self.dut.send_expect("clear port stats all", "testpmd>") + res = self.pmd_out.wait_link_status_up('all', timeout = 15) + self.verify(res is True, 'There has port link is down') self.send_and_verify("vhost queue = virtio queue") @@ -313,6 +320,7 @@ class TestVhostUserOneCopyOneVm(TestCase): Clear vhost-switch and qemu to avoid blocking the following TCs """ self.vm.stop() + self.dut.kill_all() time.sleep(2) def tear_down_all(self): -- 2.7.4