test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V1] Add test suite about vhost multi queue qemu
@ 2018-03-15  8:38 lihong
  2018-03-19  8:34 ` Liu, Yong
  0 siblings, 1 reply; 8+ messages in thread
From: lihong @ 2018-03-15  8:38 UTC (permalink / raw)
  To: dts; +Cc: lihong

Signed-off-by: lihong <lihongx.ma@intel.com>
---
 tests/TestSuite_vhost_multi_queue_qemu.py | 320 ++++++++++++++++++++++++++++++
 1 file changed, 320 insertions(+)
 create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py

diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py b/tests/TestSuite_vhost_multi_queue_qemu.py
new file mode 100644
index 0000000..0654842
--- /dev/null
+++ b/tests/TestSuite_vhost_multi_queue_qemu.py
@@ -0,0 +1,320 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#   * Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+#   * Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in
+#     the documentation and/or other materials provided with the
+#     distribution.
+#   * Neither the name of Intel Corporation nor the names of its
+#     contributors may be used to endorse or promote products derived
+#     from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+DPDK Test suite.
+
+Vhost PVP performance using Qemu test suite.
+"""
+import re
+import time
+import utils
+from test_case import TestCase
+from settings import HEADER_SIZE
+from virt_common import VM
+from packet import Packet,send_packets,save_packets
+
+
+class TestVhostUserOneCopyOneVm(TestCase):
+
+    def set_up_all(self):
+        # Get and verify the ports
+        self.dut_ports = self.dut.get_ports()
+        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
+        local_port = self.tester.get_local_port(self.dut_ports[0])
+        self.tx_interface = self.tester.get_interface(local_port)
+        # Get the port's socket
+        self.pf = self.dut_ports[0]
+        netdev = self.dut.ports_info[self.pf]['port']
+        self.socket = netdev.get_nic_socket()
+        self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket)
+        self.verify(len(self.cores) >= 3, "Insufficient cores for speed testing")
+
+        self.queue_number = 2
+        # Using file to save the vhost sample output since in jumboframe case,
+        # there will be lots of output
+
+        self.virtio1 = "eth1"
+        self.virtio1_mac = "52:54:00:00:00:01"
+        self.vm_dut = None
+
+        self.number_of_ports = 1
+        self.header_row = ["FrameSize(B)", "Throughput(Mpps)", "LineRate(%)", "Cycle"]
+        self.memory_channel = self.dut.get_memory_channels()
+
+    def set_up(self):
+        """
+        Run before each test case.
+        """
+        self.dut.send_expect("rm -rf ./vhost.out", "#")
+        self.dut.send_expect("rm -rf ./vhost-net*", "#")
+        self.dut.send_expect("killall -s INT vhost-switch", "#")
+
+        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
+        self.vm_testpmd_vector = self.target + "/app/testpmd -c %s -n 3" + \
+                                 " -- -i --tx-offloads=0x0 " + \
+                                 " --rxq=%d --txq=%d --rss-ip --nb-cores=2" % (self.queue_number, self.queue_number)
+
+    def launch_testpmd(self, queue=2):
+        """
+        Launch the vhost sample with different parameters
+        """
+        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem 1024,1024" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' -- -i --rxq=%d --txq=%d --nb-cores=2"
+        self.coremask = utils.create_mask(self.cores)
+        testcmd_start = testcmd % (self.coremask, self.memory_channel, queue, queue, queue)
+
+        self.dut.send_expect(testcmd_start, "testpmd> ", 120)
+        self.dut.send_expect("set fwd mac", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+
+    def start_onevm(self, path=""):
+        """
+        Start One VM with one virtio device
+        """
+        self.vm = VM(self.dut, 'vm0', 'vhost_sample')
+        if(path != ""):
+            self.vm.set_qemu_emulator(path)
+        vm_params = {}
+        vm_params['driver'] = 'vhost-user'
+        vm_params['opt_path'] = './vhost-net'
+        vm_params['opt_mac'] = self.virtio1_mac
+        vm_params['opt_queue'] = self.queue_number
+        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=%d' % (2*self.queue_number + 2)
+
+        self.vm.set_vm_device(**vm_params)
+
+        try:
+            self.vm_dut = self.vm.start()
+            if self.vm_dut is None:
+                raise Exception("Set up VM ENV failed")
+        except Exception as e:
+            self.logger.error("ERROR: Failure for %s" % str(e))
+
+        return True
+
+    def get_vm_coremask(self):
+        """
+        Get the vm coremask
+        """
+        cores = self.vm_dut.get_core_list("1S/3C/1T")
+        self.verify(len(cores) >= 3, "Insufficient cores for speed testing, add the cpu number in cfg file.")
+        self.vm_coremask = utils.create_mask(cores)
+        
+    def send_performance(self, case, frame_sizes, tag="Performance"):
+        """
+        Verify the testpmd can recive and forward the data
+        """
+        self.result_table_create(self.header_row)
+        for frame_size in frame_sizes:
+            info = "Running test %s, and %d frame size." % (case, frame_size)
+            self.logger.info(info)
+            payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+            tgenInput = []
+
+            pkt1 = Packet()
+            pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt1.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt2 = Packet()
+            pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt2.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}),
+                              ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt3 = Packet()
+            pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt3.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt4 = Packet()
+            pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt4.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+
+            pkt = [pkt1, pkt2, pkt3, pkt4]
+            save_packets(pkt, "/root/multiqueue_2.pcap")
+
+            port = self.tester.get_local_port(self.pf)
+            tgenInput.append((port, port, "multiqueue_2.pcap"))
+
+            _, pps = self.tester.traffic_generator_throughput(tgenInput, delay=30)
+            Mpps = pps / 1000000.0
+            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
+                                     self.number_of_ports))
+            data_row = [frame_size, str(Mpps), str(pct), tag]
+            self.result_table_add(data_row)
+            self.verify(Mpps != 0, "The recive data of frame-size: %d is 0" % frame_size)
+        self.result_table_print()
+    
+    def send_scapy(self, verify_type):
+        """
+        Verify the virtio-pmd can recive the data before/after change queue size
+        """
+        for frame_size in self.frame_sizes:
+            self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+            payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+
+
+            pkt1 = Packet()
+            pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt1.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt2 = Packet()
+            pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt2.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}),
+                              ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt3 = Packet()
+            pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt3.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt4 = Packet()
+            pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt4.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+
+            pkt = [pkt1, pkt2, pkt3, pkt4]*10
+            send_packets(self.tx_interface, pkt)
+
+            out = self.dut.send_expect("show port stats 0", "testpmd> ", 120)
+            print out
+            rx_packet = re.search("RX-packets:\s*(\d*)", out)
+            rx_num = int(rx_packet.group(1))
+            tx_packet = re.search("TX-packets:\s*(\d*)", out)
+            tx_num = int(tx_packet.group(1))
+            if verify_type == "vhost queue = virtio queue" or verify_type == "vhost queue < virtio queue" :
+                verify_rx_num = 40
+                verify_tx_num = 40
+            elif verify_type == "vhost queue > virtio queue":
+                verify_rx_num = 40
+                verify_tx_num = 10
+
+            self.verify(rx_num >= verify_rx_num and tx_num >= verify_tx_num, 
+                        "The rx or tx lost some packets of frame-size:%d" % frame_size)
+
+    def test_perf_pvp_multiqemu_mergeable_pmd(self):
+        """
+        Test the performance for mergeable path
+        """
+        self.launch_testpmd()
+        self.start_onevm()
+        self.get_vm_coremask()
+
+        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+        
+        self.dut.send_expect("stop", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+        time.sleep(5)
+        self.send_performance(self.running_case, self.frame_sizes, "Virtio 0.95 Mergeable Multiqueue Performance")
+        self.vm_dut.kill_all()
+
+    def test_dynamic_change_virtio_queue_size(self):
+        """
+        Test the performance for change vritio queue size
+        """
+        self.launch_testpmd()
+        self.start_onevm()
+        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c %s -n 3" + \
+                                  " -- -i --tx-offloads=0x0 " + \
+                                  " --rxq=1 --txq=1 --rss-ip --nb-cores=1"
+        self.get_vm_coremask()
+        self.vm_dut.send_expect(self.vm_testpmd_queue_1 % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_scapy("vhost queue > virtio queue")
+
+        self.vm_dut.send_expect("stop", "testpmd>", 20)
+        self.vm_dut.send_expect("port stop all", "testpmd>")
+        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
+        self.vm_dut.send_expect("port start all", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.dut.send_expect("stop", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+
+        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_scapy("vhost queue = virtio queue")
+
+        self.vm_dut.kill_all()
+        self.dut.send_expect("quit", "# ", 120)
+
+    def test_dynamic_change_vhost_queue_size(self):
+        """
+        Test the performance for change vhost queue size
+        """
+        self.queue_number = 2
+        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem 1024,1024" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' -- -i --rxq=1 --txq=1 --nb-cores=1"
+        self.coremask = utils.create_mask(self.cores)
+        testcmd_start = testcmd % (self.coremask, self.memory_channel)
+
+        self.dut.send_expect(testcmd_start, "testpmd> ", 120)
+        self.dut.send_expect("set fwd mac", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+
+        self.start_onevm()
+
+        self.get_vm_coremask()
+        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+
+        self.send_scapy("vhost queue < virtio queue")
+
+        self.dut.send_expect("stop", "testpmd>", 20)
+        self.dut.send_expect("port stop all", "testpmd>")
+        self.dut.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.dut.send_expect("port config all txq 2", "testpmd>")
+        self.dut.send_expect("port start all", "testpmd>", 20)
+        self.dut.send_expect("start", "testpmd>")
+        self.dut.send_expect("clear port stats all", "testpmd>")
+
+        self.send_scapy("vhost queue = virtio queue")
+
+        self.vm_dut.kill_all()
+        self.dut.send_expect("quit", "# ", 120)
+
+    def tear_down(self):
+        """
+        Run after each test case.
+        Clear vhost-switch and qemu to avoid blocking the following TCs
+        """
+        self.vm.stop()
+        time.sleep(2)
+
+    def tear_down_all(self):
+        """
+        Run after each test suite.
+        """
+        pass
-- 
2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
  2018-03-15  8:38 [dts] [PATCH V1] Add test suite about vhost multi queue qemu lihong
@ 2018-03-19  8:34 ` Liu, Yong
  0 siblings, 0 replies; 8+ messages in thread
From: Liu, Yong @ 2018-03-19  8:34 UTC (permalink / raw)
  To: Ma, LihongX, dts; +Cc: Ma, LihongX

Thanks, lihong. Some comments are inline.

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong
> Sent: Tuesday, April 21, 2020 2:49 AM
> To: dts@dpdk.org
> Cc: Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
> 
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
>  tests/TestSuite_vhost_multi_queue_qemu.py | 320
> ++++++++++++++++++++++++++++++
>  1 file changed, 320 insertions(+)
>  create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py
> 
> diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py
> b/tests/TestSuite_vhost_multi_queue_qemu.py
> new file mode 100644
> index 0000000..0654842
> --- /dev/null
> +++ b/tests/TestSuite_vhost_multi_queue_qemu.py
> @@ -0,0 +1,320 @@
> +# BSD LICENSE
> +#
> +# Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
> +# All rights reserved.
> +#
> +# Redistribution and use in source and binary forms, with or without
> +# modification, are permitted provided that the following conditions
> +# are met:
> +#
> +#   * Redistributions of source code must retain the above copyright
> +#     notice, this list of conditions and the following disclaimer.
> +#   * Redistributions in binary form must reproduce the above copyright
> +#     notice, this list of conditions and the following disclaimer in
> +#     the documentation and/or other materials provided with the
> +#     distribution.
> +#   * Neither the name of Intel Corporation nor the names of its
> +#     contributors may be used to endorse or promote products derived
> +#     from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> +
> +"""
> +DPDK Test suite.
> +
> +Vhost PVP performance using Qemu test suite.
> +"""
> +import re
> +import time
> +import utils
> +from test_case import TestCase
> +from settings import HEADER_SIZE
> +from virt_common import VM
> +from packet import Packet,send_packets,save_packets
> +
> +
> +class TestVhostUserOneCopyOneVm(TestCase):
> +
> +    def set_up_all(self):
> +        # Get and verify the ports
> +        self.dut_ports = self.dut.get_ports()
> +        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for
> testing")
> +        local_port = self.tester.get_local_port(self.dut_ports[0])
> +        self.tx_interface = self.tester.get_interface(local_port)
> +        # Get the port's socket
> +        self.pf = self.dut_ports[0]
> +        netdev = self.dut.ports_info[self.pf]['port']
> +        self.socket = netdev.get_nic_socket()
> +        self.cores = self.dut.get_core_list("1S/3C/1T",
> socket=self.socket)
> +        self.verify(len(self.cores) >= 3, "Insufficient cores for speed
> testing")
> +
> +        self.queue_number = 2
> +        # Using file to save the vhost sample output since in jumboframe
> case,
> +        # there will be lots of output
> +
> +        self.virtio1 = "eth1"
> +        self.virtio1_mac = "52:54:00:00:00:01"
> +        self.vm_dut = None
> +
> +        self.number_of_ports = 1
> +        self.header_row = ["FrameSize(B)", "Throughput(Mpps)",
> "LineRate(%)", "Cycle"]
> +        self.memory_channel = self.dut.get_memory_channels()
> +
> +    def set_up(self):
> +        """
> +        Run before each test case.
> +        """
> +        self.dut.send_expect("rm -rf ./vhost.out", "#")
> +        self.dut.send_expect("rm -rf ./vhost-net*", "#")
> +        self.dut.send_expect("killall -s INT vhost-switch", "#")
> +
> +        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
> +        self.vm_testpmd_vector = self.target + "/app/testpmd -c %s -n 3"
> + \
> +                                 " -- -i --tx-offloads=0x0 " + \
> +                                 " --rxq=%d --txq=%d --rss-ip --nb-
> cores=2" % (self.queue_number, self.queue_number)
> +
> +    def launch_testpmd(self, queue=2):
> +        """
> +        Launch the vhost sample with different parameters
> +        """
> +        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem
> 1024,1024" + \
> +                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' --
> -i --rxq=%d --txq=%d --nb-cores=2"
> +        self.coremask = utils.create_mask(self.cores)
> +        testcmd_start = testcmd % (self.coremask, self.memory_channel,
> queue, queue, queue)
> +
> +        self.dut.send_expect(testcmd_start, "testpmd> ", 120)
> +        self.dut.send_expect("set fwd mac", "testpmd> ", 120)
> +        self.dut.send_expect("start", "testpmd> ", 120)
> +
> +    def start_onevm(self, path=""):
> +        """
> +        Start One VM with one virtio device
> +        """

Look like argument path is useless in this suite. Better approach for changing qemu path is from configuration file.

> +        self.vm = VM(self.dut, 'vm0', 'vhost_sample')
> +        if(path != ""):
> +            self.vm.set_qemu_emulator(path)
> +        vm_params = {}
> +        vm_params['driver'] = 'vhost-user'
> +        vm_params['opt_path'] = './vhost-net'
> +        vm_params['opt_mac'] = self.virtio1_mac
> +        vm_params['opt_queue'] = self.queue_number
> +        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=%d' %
> (2*self.queue_number + 2)
> +
> +        self.vm.set_vm_device(**vm_params)
> +
> +        try:
> +            self.vm_dut = self.vm.start()
> +            if self.vm_dut is None:
> +                raise Exception("Set up VM ENV failed")
> +        except Exception as e:
> +            self.logger.error("ERROR: Failure for %s" % str(e))
> +
> +        return True
> +
> +    def get_vm_coremask(self):
> +        """
> +        Get the vm coremask
> +        """
> +        cores = self.vm_dut.get_core_list("1S/3C/1T")
> +        self.verify(len(cores) >= 3, "Insufficient cores for speed
> testing, add the cpu number in cfg file.")
> +        self.vm_coremask = utils.create_mask(cores)
> +
> +    def send_performance(self, case, frame_sizes, tag="Performance"):
> +        """
> +        Verify the testpmd can recive and forward the data
> +        """

Function name look strange for me, you can use name 'vhost_performance' if you are measuring vhost performance.
Look like argument tag is useless, you can just use hard-coded title for performance table.

> +        self.result_table_create(self.header_row)
> +        for frame_size in frame_sizes:
> +            info = "Running test %s, and %d frame size." % (case,
> frame_size)
> +            self.logger.info(info)
> +            payload_size = frame_size - HEADER_SIZE['eth'] -
> HEADER_SIZE['ip'] - HEADER_SIZE['udp']
> +            tgenInput = []
> +
> +            pkt1 = Packet()
> +            pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt1.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}),
> +                               ('udp', {'src': 4789, 'dst': 4789}),
> ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
> +            pkt2 = Packet()
> +            pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt2.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}),
> +                              ('udp', {'src': 4789, 'dst': 4789}), ('raw',
> {'payload': ['01'] * int('%d' % payload_size)})])
> +            pkt3 = Packet()
> +            pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt3.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}),
> +                               ('udp', {'src': 4789, 'dst': 4789}),
> ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
> +            pkt4 = Packet()
> +            pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt4.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}),
> +                               ('udp', {'src': 4789, 'dst': 4789}),
> ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
> +
> +            pkt = [pkt1, pkt2, pkt3, pkt4]
> +            save_packets(pkt, "/root/multiqueue_2.pcap")
> +
> +            port = self.tester.get_local_port(self.pf)
> +            tgenInput.append((port, port, "multiqueue_2.pcap"))
> +
> +            _, pps = self.tester.traffic_generator_throughput(tgenInput,
> delay=30)	
> +            Mpps = pps / 1000000.0
> +            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
> +                                     self.number_of_ports))
> +            data_row = [frame_size, str(Mpps), str(pct), tag]
> +            self.result_table_add(data_row)	
> +            self.verify(Mpps != 0, "The recive data of frame-size: %d is
> 0" % frame_size)

Typo here, recive should be "receive". 

> +        self.result_table_print()
> +
> +    def send_scapy(self, verify_type):
> +        """
> +        Verify the virtio-pmd can recive the data before/after change
> queue size
> +        """

The naming of this function can't stand for what it is done. You can change name like "send_and_verify". 
You'd better to describe all kinds of verify_type in function description. 

> +        for frame_size in self.frame_sizes:
> +            self.dut.send_expect("clear port stats all", "testpmd> ", 120)
> +            payload_size = frame_size - HEADER_SIZE['eth'] -
> HEADER_SIZE['ip'] - HEADER_SIZE['udp']
> +
> +
> +            pkt1 = Packet()
> +            pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt1.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}),
> +                               ('udp', {'src': 4789, 'dst': 4789}),
> ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
> +            pkt2 = Packet()
> +            pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt2.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}),
> +                              ('udp', {'src': 4789, 'dst': 4789}), ('raw',
> {'payload': ['01'] * int('%d' % payload_size)})])
> +            pkt3 = Packet()
> +            pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt3.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}),
> +                               ('udp', {'src': 4789, 'dst': 4789}),
> ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
> +            pkt4 = Packet()
> +            pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
> +            pkt4.config_layers([('ether', {'dst': '%s' %
> self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}),
> +                               ('udp', {'src': 4789, 'dst': 4789}),
> ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
> +
> +            pkt = [pkt1, pkt2, pkt3, pkt4]*10
Need space between "*", you can check code style by pep8 command.

> +            send_packets(self.tx_interface, pkt)
> +
> +            out = self.dut.send_expect("show port stats 0", "testpmd> ",
> 120)
> +            print out
> +            rx_packet = re.search("RX-packets:\s*(\d*)", out)
> +            rx_num = int(rx_packet.group(1))
> +            tx_packet = re.search("TX-packets:\s*(\d*)", out)
> +            tx_num = int(tx_packet.group(1))
> +            if verify_type == "vhost queue = virtio queue" or verify_type
> == "vhost queue < virtio queue" :
> +                verify_rx_num = 40
> +                verify_tx_num = 40
> +            elif verify_type == "vhost queue > virtio queue":
> +                verify_rx_num = 40
> +                verify_tx_num = 10
> +

Should tx and rx number be equaled? 

> +            self.verify(rx_num >= verify_rx_num and tx_num >=
> verify_tx_num,
> +                        "The rx or tx lost some packets of frame-
> size:%d" % frame_size)
> +
> +    def test_perf_pvp_multiqemu_mergeable_pmd(self):
> +        """
> +        Test the performance for mergeable path
> +        """
> +        self.launch_testpmd()
> +        self.start_onevm()
> +        self.get_vm_coremask()
> +
> +        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask,
> "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.dut.send_expect("stop", "testpmd> ", 120)
> +        self.dut.send_expect("start", "testpmd> ", 120)
> +        time.sleep(5)
> +        self.send_performance(self.running_case, self.frame_sizes,
> "Virtio 0.95 Mergeable Multiqueue Performance")
> +        self.vm_dut.kill_all()
> +
> +    def test_dynamic_change_virtio_queue_size(self):
> +        """
> +        Test the performance for change vritio queue size
> +        """
> +        self.launch_testpmd()
> +        self.start_onevm()
> +        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c %s -n 3"
> + \
> +                                  " -- -i --tx-offloads=0x0 " + \
> +                                  " --rxq=1 --txq=1 --rss-ip --nb-
> cores=1"
> +        self.get_vm_coremask()
> +        self.vm_dut.send_expect(self.vm_testpmd_queue_1 %
> self.vm_coremask, "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
> +        self.send_scapy("vhost queue > virtio queue")
> +
> +        self.vm_dut.send_expect("stop", "testpmd>", 20)
> +        self.vm_dut.send_expect("port stop all", "testpmd>")
> +        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
> +        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
> +        self.vm_dut.send_expect("port start all", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.dut.send_expect("stop", "testpmd> ", 120)
> +        self.dut.send_expect("start", "testpmd> ", 120)
> +
> +        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
> +        self.send_scapy("vhost queue = virtio queue")
> +
> +        self.vm_dut.kill_all()
> +        self.dut.send_expect("quit", "# ", 120)
> +
> +    def test_dynamic_change_vhost_queue_size(self):
> +        """
> +        Test the performance for change vhost queue size
> +        """
> +        self.queue_number = 2
> +        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem
> 1024,1024" + \
> +                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' --
> -i --rxq=1 --txq=1 --nb-cores=1"
> +        self.coremask = utils.create_mask(self.cores)
> +        testcmd_start = testcmd % (self.coremask, self.memory_channel)
> +
> +        self.dut.send_expect(testcmd_start, "testpmd> ", 120)
> +        self.dut.send_expect("set fwd mac", "testpmd> ", 120)
> +        self.dut.send_expect("start", "testpmd> ", 120)
> +
> +        self.start_onevm()
> +
> +        self.get_vm_coremask()
> +        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask,
> "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
> +
> +        self.send_scapy("vhost queue < virtio queue")
> +
> +        self.dut.send_expect("stop", "testpmd>", 20)
> +        self.dut.send_expect("port stop all", "testpmd>")
> +        self.dut.send_expect("port config all rxq 2", "testpmd>", 20)
> +        self.dut.send_expect("port config all txq 2", "testpmd>")
> +        self.dut.send_expect("port start all", "testpmd>", 20)
> +        self.dut.send_expect("start", "testpmd>")
> +        self.dut.send_expect("clear port stats all", "testpmd>")
> +
> +        self.send_scapy("vhost queue = virtio queue")
> +
> +        self.vm_dut.kill_all()
> +        self.dut.send_expect("quit", "# ", 120)
> +
> +    def tear_down(self):
> +        """
> +        Run after each test case.
> +        Clear vhost-switch and qemu to avoid blocking the following TCs
> +        """
> +        self.vm.stop()
> +        time.sleep(2)
> +
> +    def tear_down_all(self):
> +        """
> +        Run after each test suite.
> +        """
> +        pass
> --
> 2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
  2018-03-20  8:41 lihong
@ 2018-03-23 10:10 ` Liu, Yong
  0 siblings, 0 replies; 8+ messages in thread
From: Liu, Yong @ 2018-03-23 10:10 UTC (permalink / raw)
  To: lihong, dts

Thanks, lihong. Applied.

On 04/26/2020 02:52 AM, lihong wrote:
> Signed-off-by: lihong<lihongx.ma@intel.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dts] [PATCH V1] Add test suite about vhost multi queue qemu
@ 2018-03-20  8:41 lihong
  2018-03-23 10:10 ` Liu, Yong
  0 siblings, 1 reply; 8+ messages in thread
From: lihong @ 2018-03-20  8:41 UTC (permalink / raw)
  To: dts; +Cc: lihong

Signed-off-by: lihong <lihongx.ma@intel.com>
---
 tests/TestSuite_vhost_multi_queue_qemu.py | 322 ++++++++++++++++++++++++++++++
 1 file changed, 322 insertions(+)
 create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py

diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py b/tests/TestSuite_vhost_multi_queue_qemu.py
new file mode 100644
index 0000000..769fb5d
--- /dev/null
+++ b/tests/TestSuite_vhost_multi_queue_qemu.py
@@ -0,0 +1,322 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#   * Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+#   * Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in
+#     the documentation and/or other materials provided with the
+#     distribution.
+#   * Neither the name of Intel Corporation nor the names of its
+#     contributors may be used to endorse or promote products derived
+#     from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+DPDK Test suite.
+
+Vhost PVP performance using Qemu test suite.
+"""
+import re
+import time
+import utils
+from test_case import TestCase
+from settings import HEADER_SIZE
+from virt_common import VM
+from packet import Packet, send_packets, save_packets
+
+
+class TestVhostUserOneCopyOneVm(TestCase):
+
+    def set_up_all(self):
+        # Get and verify the ports
+        self.dut_ports = self.dut.get_ports()
+        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
+        local_port = self.tester.get_local_port(self.dut_ports[0])
+        self.tx_interface = self.tester.get_interface(local_port)
+        # Get the port's socket
+        self.pf = self.dut_ports[0]
+        netdev = self.dut.ports_info[self.pf]['port']
+        self.socket = netdev.get_nic_socket()
+        self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket)
+        self.verify(len(self.cores) >= 3, "Insufficient cores for speed testing")
+
+        self.queue_number = 2
+        # Using file to save the vhost sample output since in jumboframe case,
+        # there will be lots of output
+
+        self.virtio1 = "eth1"
+        self.virtio1_mac = "52:54:00:00:00:01"
+        self.vm_dut = None
+
+        self.number_of_ports = 1
+        self.header_row = ["FrameSize(B)", "Throughput(Mpps)", "LineRate(%)", "Cycle"]
+        self.memory_channel = self.dut.get_memory_channels()
+
+    def set_up(self):
+        """
+        Run before each test case.
+        """
+        self.dut.send_expect("rm -rf ./vhost.out", "#")
+        self.dut.send_expect("rm -rf ./vhost-net*", "#")
+        self.dut.send_expect("killall -s INT vhost-switch", "#")
+
+        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
+        self.vm_testpmd_vector = self.target + "/app/testpmd -c %s -n 3" + \
+                                 " -- -i --tx-offloads=0x0 " + \
+                                 " --rxq=%d --txq=%d --rss-ip --nb-cores=2" % (self.queue_number, self.queue_number)
+
+    def launch_testpmd(self):
+        """
+        Launch the vhost sample with different parameters
+        """
+        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem 1024,1024" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' -- -i --rxq=%d --txq=%d --nb-cores=2"
+        self.coremask = utils.create_mask(self.cores)
+        testcmd_start = testcmd % (self.coremask, self.memory_channel, self.queue_number, self.queue_number, self.queue_number)
+
+        self.dut.send_expect(testcmd_start, "testpmd> ", 120)
+        self.dut.send_expect("set fwd mac", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+
+    def start_onevm(self):
+        """
+        Start One VM with one virtio device
+        """
+        self.vm = VM(self.dut, 'vm0', 'vhost_sample')
+        vm_params = {}
+        vm_params['driver'] = 'vhost-user'
+        vm_params['opt_path'] = './vhost-net'
+        vm_params['opt_mac'] = self.virtio1_mac
+        vm_params['opt_queue'] = self.queue_number
+        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=%d' % (2*self.queue_number + 2)
+
+        self.vm.set_vm_device(**vm_params)
+
+        try:
+            self.vm_dut = self.vm.start()
+            if self.vm_dut is None:
+                raise Exception("Set up VM ENV failed")
+        except Exception as e:
+            self.logger.error("ERROR: Failure for %s" % str(e))
+
+        return True
+
+    def get_vm_coremask(self):
+        """
+        Get the vm coremask
+        """
+        cores = self.vm_dut.get_core_list("1S/3C/1T")
+        self.verify(len(cores) >= 3, "Insufficient cores for speed testing, add the cpu number in cfg file.")
+        self.vm_coremask = utils.create_mask(cores)
+
+    def vhost_performance(self):
+        """
+        Verify the testpmd can recive and forward the data
+        """
+        self.result_table_create(self.header_row)
+        for frame_size in self.frame_sizes:
+            info = "Running test %s, and %d frame size." % (self.running_case, frame_size)
+            self.logger.info(info)
+            payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+            tgenInput = []
+
+            pkt1 = Packet()
+            pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt1.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt2 = Packet()
+            pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt2.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}),
+                              ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt3 = Packet()
+            pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt3.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt4 = Packet()
+            pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt4.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+
+            pkt = [pkt1, pkt2, pkt3, pkt4]
+            save_packets(pkt, "/root/multiqueue_2.pcap")
+
+            port = self.tester.get_local_port(self.pf)
+            tgenInput.append((port, port, "multiqueue_2.pcap"))
+
+            _, pps = self.tester.traffic_generator_throughput(tgenInput, delay=30)
+            Mpps = pps / 1000000.0
+            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
+                                     self.number_of_ports))
+            data_row = [frame_size, str(Mpps), str(pct), "Mergeable Multiqueue Performance"]
+            self.result_table_add(data_row)
+            self.verify(Mpps != 0, "The receive data of frame-size: %d is 0" % frame_size)
+        self.result_table_print()
+
+    def send_and_verify(self, verify_type):
+        """
+        Verify the virtio-pmd can recive the data before/after change queue size
+        While verify_type is "vhost queue = virtio queue", the vhost should forward all set of data
+        While verify_type is "vhost queue < virtio queue", the vhost should forward all set of data
+        While verify_type is "vhost queue > virtio queue", the vhost should forward at least one set of data
+        """
+        for frame_size in self.frame_sizes:
+            info = "Running test %s, and %d frame size." % (self.running_case, frame_size)
+            self.logger.info(info)
+            self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+            payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+
+            pkt1 = Packet()
+            pkt1.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt1.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.1'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt2 = Packet()
+            pkt2.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt2.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.20'}),
+                              ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt3 = Packet()
+            pkt3.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt3.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.7'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+            pkt4 = Packet()
+            pkt4.assign_layers(['ether', 'ipv4', 'udp', 'raw'])
+            pkt4.config_layers([('ether', {'dst': '%s' % self.virtio1_mac}), ('ipv4', {'dst': '1.1.1.8'}),
+                               ('udp', {'src': 4789, 'dst': 4789}), ('raw', {'payload': ['01'] * int('%d' % payload_size)})])
+
+            pkt = [pkt1, pkt2, pkt3, pkt4] * 10
+            send_packets(self.tx_interface, pkt)
+
+            out = self.dut.send_expect("show port stats 0", "testpmd> ", 120)
+            print out
+            rx_packet = re.search("RX-packets:\s*(\d*)", out)
+            rx_num = int(rx_packet.group(1))
+            tx_packet = re.search("TX-packets:\s*(\d*)", out)
+            tx_num = int(tx_packet.group(1))
+            if verify_type == "vhost queue = virtio queue" or verify_type == "vhost queue < virtio queue":
+                verify_rx_num = 40
+                verify_tx_num = 40
+            elif verify_type == "vhost queue > virtio queue":
+                verify_rx_num = 40
+                verify_tx_num = 10
+
+            self.verify(rx_num >= verify_rx_num and tx_num >= verify_tx_num,
+                        "The rx or tx lost some packets of frame-size:%d" % frame_size)
+
+    def test_perf_pvp_multiqemu_mergeable_pmd(self):
+        """
+        Test the performance for mergeable path
+        """
+        self.launch_testpmd()
+        self.start_onevm()
+        self.get_vm_coremask()
+
+        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.dut.send_expect("stop", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+        time.sleep(5)
+        self.vhost_performance()
+        self.vm_dut.kill_all()
+
+    def test_dynamic_change_virtio_queue_size(self):
+        """
+        Test the performance for change vritio queue size
+        """
+        self.launch_testpmd()
+        self.start_onevm()
+        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c %s -n 3" + \
+                                  " -- -i --tx-offloads=0x0 " + \
+                                  " --rxq=1 --txq=1 --rss-ip --nb-cores=1"
+        self.get_vm_coremask()
+        self.vm_dut.send_expect(self.vm_testpmd_queue_1 % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_and_verify("vhost queue > virtio queue")
+
+        self.vm_dut.send_expect("stop", "testpmd>", 20)
+        self.vm_dut.send_expect("port stop all", "testpmd>")
+        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
+        self.vm_dut.send_expect("port start all", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.dut.send_expect("stop", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+
+        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_and_verify("vhost queue = virtio queue")
+
+        self.vm_dut.kill_all()
+        self.dut.send_expect("quit", "# ", 120)
+
+    def test_dynamic_change_vhost_queue_size(self):
+        """
+        Test the performance for change vhost queue size
+        """
+        self.queue_number = 2
+        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem 1024,1024" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' -- -i --rxq=1 --txq=1 --nb-cores=1"
+        self.coremask = utils.create_mask(self.cores)
+        testcmd_start = testcmd % (self.coremask, self.memory_channel)
+
+        self.dut.send_expect(testcmd_start, "testpmd> ", 120)
+        self.dut.send_expect("set fwd mac", "testpmd> ", 120)
+        self.dut.send_expect("start", "testpmd> ", 120)
+
+        self.start_onevm()
+
+        self.get_vm_coremask()
+        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+        self.dut.send_expect("clear port stats all", "testpmd> ", 120)
+
+        self.send_and_verify("vhost queue < virtio queue")
+
+        self.dut.send_expect("stop", "testpmd>", 20)
+        self.dut.send_expect("port stop all", "testpmd>")
+        self.dut.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.dut.send_expect("port config all txq 2", "testpmd>")
+        self.dut.send_expect("port start all", "testpmd>", 20)
+        self.dut.send_expect("start", "testpmd>")
+        self.dut.send_expect("clear port stats all", "testpmd>")
+
+        self.send_and_verify("vhost queue = virtio queue")
+
+        self.vm_dut.kill_all()
+        self.dut.send_expect("quit", "# ", 120)
+
+    def tear_down(self):
+        """
+        Run after each test case.
+        Clear vhost-switch and qemu to avoid blocking the following TCs
+        """
+        self.vm.stop()
+        time.sleep(2)
+
+    def tear_down_all(self):
+        """
+        Run after each test suite.
+        """
+        pass
-- 
2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
  2018-03-09  7:28 lihong
@ 2018-03-09  8:51 ` Liu, Yong
  0 siblings, 0 replies; 8+ messages in thread
From: Liu, Yong @ 2018-03-09  8:51 UTC (permalink / raw)
  To: Ma, LihongX, dts; +Cc: Ma, LihongX

Lihong, 
Some comments are inline.

Thanks,
Marvin

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong
> Sent: Wednesday, April 15, 2020 1:39 AM
> To: dts@dpdk.org
> Cc: Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
> 
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
>  tests/TestSuite_vhost_multi_queue_qemu.py | 300
> ++++++++++++++++++++++++++++++
>  1 file changed, 300 insertions(+)
>  create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py
> 
> diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py
> b/tests/TestSuite_vhost_multi_queue_qemu.py
> new file mode 100644
> index 0000000..80a02bd
> --- /dev/null
> +++ b/tests/TestSuite_vhost_multi_queue_qemu.py
> @@ -0,0 +1,300 @@
> +# BSD LICENSE
> +#
> +# Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
> +# All rights reserved.
> +#
> +# Redistribution and use in source and binary forms, with or without
> +# modification, are permitted provided that the following conditions
> +# are met:
> +#
> +#   * Redistributions of source code must retain the above copyright
> +#     notice, this list of conditions and the following disclaimer.
> +#   * Redistributions in binary form must reproduce the above copyright
> +#     notice, this list of conditions and the following disclaimer in
> +#     the documentation and/or other materials provided with the
> +#     distribution.
> +#   * Neither the name of Intel Corporation nor the names of its
> +#     contributors may be used to endorse or promote products derived
> +#     from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> +
> +"""
> +DPDK Test suite.
> +
> +Vhost PVP performance using Qemu test suite.
> +"""
> +import re
> +import time
> +import utils
> +from scapy.utils import wrpcap
> +from test_case import TestCase
> +from settings import HEADER_SIZE
> +from virt_common import VM
> +
> +
> +class TestVhostUserOneCopyOneVm(TestCase):
> +
> +    def set_up_all(self):
> +        # Get and verify the ports
> +        self.dut_ports = self.dut.get_ports()
> +        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for
> testing")
> +        local_port = self.tester.get_local_port(self.dut_ports[0])
> +        self.tx_interface = self.tester.get_interface(local_port)
> +        # Get the port's socket
> +        self.pf = self.dut_ports[0]
> +        netdev = self.dut.ports_info[self.pf]['port']
> +        self.socket = netdev.get_nic_socket()
> +        self.cores = self.dut.get_core_list("1S/3C/1T",
> socket=self.socket)
> +        self.verify(self.cores is not None, "Insufficient cores for speed
> testing")

Function get_core_list won't return None if allocated cores are insufficient. You can do same kind of function by check length of self.cores.

> +
> +        self.queue_number = 2
> +
> +        # Using file to save the vhost sample output since in jumboframe
> case,
> +        # there will be lots of output
> +
> +        self.virtio1 = "eth1"
> +        self.virtio1_mac = "52:54:00:00:00:01"
> +        self.vm_dut = None
> +
> +        self.number_of_ports = 1
> +        self.header_row = ["FrameSize(B)", "Throughput(Mpps)",
> "LineRate(%)", "Cycle"]
> +        self.memory_channel = self.dut.get_memory_channels()
> +
> +    def set_up(self):
> +        #
> +        # Run before each test case.
> +        #
> +        self.dut.send_expect("rm -rf ./vhost.out", "#")
> +        self.dut.send_expect("rm -rf ./vhost-net*", "#")
> +        self.dut.send_expect("killall -s INT vhost-switch", "#")
> +
> +        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
> +        self.vm_testpmd_vector = self.target + "/app/testpmd -c %s -n 3"
> + \
> +                                 " -- -i --tx-offloads=0x0 " + \
> +                                 " --rxq=%d --txq=%d --rss-ip --nb-
> cores=2" % (self.queue_number, self.queue_number)
> +
> +    def launch_testpmd(self, queue=2):
> +        #
> +        # Launch the vhost sample with different parameters
> +        #
> +        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem
> 1024,1024" + \
> +                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' --
> -i --rxq=%d --txq=%d --nb-cores=2"
> +        self.coremask = utils.create_mask(self.cores)
> +        testcmd_start = testcmd % (self.coremask, self.memory_channel,
> queue, queue, queue)
> +
> +        self.vhost_user = self.dut.new_session(suite="user")
> +
> +        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)

DPDK folder is saved in self.dut.base_dir. I'm not sure why need additional session for starting testpmd.
Look like you haven't use host default session in this suite.

> +        self.vhost_user.send_expect(testcmd_start, "testpmd> ", 120)
> +        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +
> +    def start_onevm(self, path=""):
> +        #
> +        # Start One VM with one virtio device
> +        #
Please use """ as separator for function description. 

> +        self.vm = VM(self.dut, 'vm0', 'vhost_sample')
> +        if(path != ""):
> +            self.vm.set_qemu_emulator(path)
> +        vm_params = {}
> +        vm_params['driver'] = 'vhost-user'
> +        vm_params['opt_path'] = './vhost-net'
> +        vm_params['opt_mac'] = self.virtio1_mac
> +        vm_params['opt_queue'] = self.queue_number
> +        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=6'

Vectors should be calculated? 

> +
> +        self.vm.set_vm_device(**vm_params)
> +
> +        try:
> +            self.vm_dut = self.vm.start()
> +            if self.vm_dut is None:
> +                raise Exception("Set up VM ENV failed")
> +        except Exception as e:
> +            self.logger.error("ERROR: Failure for %s" % str(e))
> +
> +        return True
> +
> +    def get_vm_coremask(self):
> +        #
> +        # Get the vm coremask
> +        #
> +        cores = self.vm_dut.get_core_list("1S/3C/1T")
> +        self.verify(cores is not None, "Insufficient cores for speed
> testing, add the cpu number in cfg file.")


Same comment as previous.

> +        self.vm_coremask = utils.create_mask(cores)
> +
> +    def send_performance(self, case, frame_sizes, tag="Performance"):
> +        #
> +        # Verify the testpmd can recive and forward the data
> +        #
> +        self.result_table_create(self.header_row)
> +        for frame_size in frame_sizes:
> +            info = "Running test %s, and %d frame size." % (case,
> frame_size)
> +            self.logger.info(info)
> +            payload_size = frame_size - HEADER_SIZE['eth'] -
> HEADER_SIZE['ip'] - HEADER_SIZE['udp']
> +            tgenInput = []
> +
> +            self.tester.scapy_append('a=
> [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('b=
> [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('c=
> [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('d=
> [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('a= a + b + c + d')
> +            self.tester.scapy_append('wrpcap("multiqueue_2.pcap", a)')
> +            self.tester.scapy_execute()
> +
> +            port = self.tester.get_local_port(self.pf)
> +            tgenInput.append((port, port, "multiqueue_2.pcap"))
> +
> +            _, pps = self.tester.traffic_generator_throughput(tgenInput,
> delay=30)
> +            Mpps = pps / 1000000.0
> +            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
> +                                     self.number_of_ports))
> +            data_row = [frame_size, str(Mpps), str(pct), tag]
> +            self.result_table_add(data_row)
> +            self.verify(Mpps != 0, "The recive data of frame-size: %d is
> 0" % frame_size)
> +        self.result_table_print()
> +
> +    def send_scapy(self, verify_type):
> +        #
> +        # Verify the virtio-pmd can recive the data before/after change
> queue size
> +        #
> +        payload_size = 256 - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] -
> HEADER_SIZE['udp']
> +        self.tester.scapy_append('pk1 =
> [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk2 =
> [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk3 =
> [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk4 =
> [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk = pk1 + pk2 + pk3 + pk4')
> +        self.tester.scapy_append('sendp(pk, iface="%s", count=10)' %
> (self.tx_interface))
> +        self.tester.scapy_execute()


Please use those functions in packet module for packet transmission.

> +
> +        out = self.vhost_user.send_expect("show port stats 0", "testpmd>
> ", 120)
> +        print out
> +        rx_packet = re.search("RX-packets:\s*(\d*)", out)
> +        rx_num = int(rx_packet.group(1))
> +        tx_packet = re.search("TX-packets:\s*(\d*)", out)
> +        tx_num = int(tx_packet.group(1))
> +        if verify_type == "vhost queue = virtio queue" or verify_type ==
> "vhost queue < virtio queue" :
> +            verify_rx_num = 40
> +            verify_tx_num = 40
> +        elif verify_type == "vhost queue > virtio queue":
> +            verify_rx_num = 40
> +            verify_tx_num = 10
> +
> +        self.verify(rx_num >= verify_rx_num and tx_num >= verify_tx_num,
> "The vm port lost some packets")
> +
> +    def test_perf_pvp_multiqemu_mergeable_pmd(self):
> +        #
> +        # Test the performance for mergeable path
> +        #
> +        self.launch_testpmd()
> +        self.start_onevm()
> +        self.get_vm_coremask()
> +
> +        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask,
> "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.vhost_user.send_expect("stop", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +        time.sleep(5)
> +        self.send_performance(self.running_case, self.frame_sizes,
> "Virtio 0.95 Mergeable Multiqueue Performance")
> +        self.vm_dut.kill_all()
> +
> +    def test_dynamic_change_virtio_queue_size(self):
> +        #
> +        # Test the performance for change vritio queue size
> +        #
> +        self.launch_testpmd()
> +        self.start_onevm()
> +        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c %s -n 3"
> + \
> +                                  " -- -i --tx-offloads=0x0 " + \
> +                                  " --rxq=1 --txq=1 --rss-ip --nb-
> cores=1"
> +        self.get_vm_coremask()
> +        self.vm_dut.send_expect(self.vm_testpmd_queue_1 %
> self.vm_coremask, "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.vhost_user.send_expect("clear port stats all", "testpmd> ",
> 120)
> +        self.send_scapy("vhost queue > virtio queue")
> +
> +        self.vm_dut.send_expect("stop", "testpmd>", 20)
> +        self.vm_dut.send_expect("port stop all", "testpmd>")
> +        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
> +        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
> +        self.vm_dut.send_expect("port start all", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.vhost_user.send_expect("stop", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +
> +        self.vhost_user.send_expect("clear port stats all", "testpmd> ",
> 120)
> +        self.send_scapy("vhost queue = virtio queue")
> +
> +        self.vm_dut.kill_all()
> +        self.vhost_user.send_expect("quit", "# ", 120)
> +
> +    def test_dynamic_change_vhost_queue_size(self):
> +        #
> +        # Test the performance for change vhost queue size
> +        #
> +        self.queue_number = 2
> +        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem
> 1024,1024" + \
> +                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' --
> -i --rxq=1 --txq=1 --nb-cores=1"
> +        self.coremask = utils.create_mask(self.cores)
> +        testcmd_start = testcmd % (self.coremask, self.memory_channel)
> +
> +        self.vhost_user = self.dut.new_session(suite="user")
> +
> +        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
> +        self.vhost_user.send_expect(testcmd_start, "testpmd> ", 120)
> +        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +
> +        self.start_onevm()
> +
> +        self.get_vm_coremask()
> +        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask,
> "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +        self.vhost_user.send_expect("clear port stats all", "testpmd> ",
> 120)
> +
> +        self.send_scapy("vhost queue < virtio queue")
> +
> +        self.vhost_user.send_expect("stop", "testpmd>", 20)
> +        self.vhost_user.send_expect("port stop all", "testpmd>")
> +        self.vhost_user.send_expect("port config all rxq 2", "testpmd>",
> 20)
> +        self.vhost_user.send_expect("port config all txq 2", "testpmd>")
> +        self.vhost_user.send_expect("port start all", "testpmd>", 20)
> +        self.vhost_user.send_expect("start", "testpmd>")
> +        self.vhost_user.send_expect("clear port stats all", "testpmd>")
> +
> +        self.send_scapy("vhost queue = virtio queue")
> +
> +        self.vm_dut.kill_all()
> +        self.vhost_user.send_expect("quit", "# ", 120)
> +
> +    def tear_down(self):
> +        #
> +        # Run after each test case.
> +        # Clear vhost-switch and qemu to avoid blocking the following TCs
> +        #
> +        if getattr(self, 'vhost_user', None):
> +            self.dut.close_session(self.vhost_user)
> +        self.vm.stop()
> +        time.sleep(2)
> +
> +    def tear_down_all(self):
> +        """
> +        Run after each test suite.
> +        """
> +        pass
> --
> 2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dts] [PATCH V1] Add test suite about vhost multi queue qemu
@ 2018-03-09  7:28 lihong
  2018-03-09  8:51 ` Liu, Yong
  0 siblings, 1 reply; 8+ messages in thread
From: lihong @ 2018-03-09  7:28 UTC (permalink / raw)
  To: dts; +Cc: lihong

Signed-off-by: lihong <lihongx.ma@intel.com>
---
 tests/TestSuite_vhost_multi_queue_qemu.py | 300 ++++++++++++++++++++++++++++++
 1 file changed, 300 insertions(+)
 create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py

diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py b/tests/TestSuite_vhost_multi_queue_qemu.py
new file mode 100644
index 0000000..80a02bd
--- /dev/null
+++ b/tests/TestSuite_vhost_multi_queue_qemu.py
@@ -0,0 +1,300 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#   * Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+#   * Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in
+#     the documentation and/or other materials provided with the
+#     distribution.
+#   * Neither the name of Intel Corporation nor the names of its
+#     contributors may be used to endorse or promote products derived
+#     from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+DPDK Test suite.
+
+Vhost PVP performance using Qemu test suite.
+"""
+import re
+import time
+import utils
+from scapy.utils import wrpcap
+from test_case import TestCase
+from settings import HEADER_SIZE
+from virt_common import VM
+
+
+class TestVhostUserOneCopyOneVm(TestCase):
+
+    def set_up_all(self):
+        # Get and verify the ports
+        self.dut_ports = self.dut.get_ports()
+        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
+        local_port = self.tester.get_local_port(self.dut_ports[0])
+        self.tx_interface = self.tester.get_interface(local_port)
+        # Get the port's socket
+        self.pf = self.dut_ports[0]
+        netdev = self.dut.ports_info[self.pf]['port']
+        self.socket = netdev.get_nic_socket()
+        self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket)
+        self.verify(self.cores is not None, "Insufficient cores for speed testing")
+
+        self.queue_number = 2
+
+        # Using file to save the vhost sample output since in jumboframe case,
+        # there will be lots of output
+
+        self.virtio1 = "eth1"
+        self.virtio1_mac = "52:54:00:00:00:01"
+        self.vm_dut = None
+
+        self.number_of_ports = 1
+        self.header_row = ["FrameSize(B)", "Throughput(Mpps)", "LineRate(%)", "Cycle"]
+        self.memory_channel = self.dut.get_memory_channels()
+
+    def set_up(self):
+        #
+        # Run before each test case.
+        #
+        self.dut.send_expect("rm -rf ./vhost.out", "#")
+        self.dut.send_expect("rm -rf ./vhost-net*", "#")
+        self.dut.send_expect("killall -s INT vhost-switch", "#")
+
+        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
+        self.vm_testpmd_vector = self.target + "/app/testpmd -c %s -n 3" + \
+                                 " -- -i --tx-offloads=0x0 " + \
+                                 " --rxq=%d --txq=%d --rss-ip --nb-cores=2" % (self.queue_number, self.queue_number)
+
+    def launch_testpmd(self, queue=2):
+        #
+        # Launch the vhost sample with different parameters
+        #
+        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem 1024,1024" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' -- -i --rxq=%d --txq=%d --nb-cores=2"
+        self.coremask = utils.create_mask(self.cores)
+        testcmd_start = testcmd % (self.coremask, self.memory_channel, queue, queue, queue)
+
+        self.vhost_user = self.dut.new_session(suite="user")
+
+        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
+        self.vhost_user.send_expect(testcmd_start, "testpmd> ", 120)
+        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+
+    def start_onevm(self, path=""):
+        #
+        # Start One VM with one virtio device
+        #
+        self.vm = VM(self.dut, 'vm0', 'vhost_sample')
+        if(path != ""):
+            self.vm.set_qemu_emulator(path)
+        vm_params = {}
+        vm_params['driver'] = 'vhost-user'
+        vm_params['opt_path'] = './vhost-net'
+        vm_params['opt_mac'] = self.virtio1_mac
+        vm_params['opt_queue'] = self.queue_number
+        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=6'
+
+        self.vm.set_vm_device(**vm_params)
+
+        try:
+            self.vm_dut = self.vm.start()
+            if self.vm_dut is None:
+                raise Exception("Set up VM ENV failed")
+        except Exception as e:
+            self.logger.error("ERROR: Failure for %s" % str(e))
+
+        return True
+
+    def get_vm_coremask(self):
+        #
+        # Get the vm coremask
+        #
+        cores = self.vm_dut.get_core_list("1S/3C/1T")
+        self.verify(cores is not None, "Insufficient cores for speed testing, add the cpu number in cfg file.")
+        self.vm_coremask = utils.create_mask(cores)
+        
+    def send_performance(self, case, frame_sizes, tag="Performance"):
+        #
+        # Verify the testpmd can recive and forward the data
+        #
+        self.result_table_create(self.header_row)
+        for frame_size in frame_sizes:
+            info = "Running test %s, and %d frame size." % (case, frame_size)
+            self.logger.info(info)
+            payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+            tgenInput = []
+
+            self.tester.scapy_append('a= [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('b= [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('c= [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('d= [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('a= a + b + c + d')
+            self.tester.scapy_append('wrpcap("multiqueue_2.pcap", a)')
+            self.tester.scapy_execute()
+
+            port = self.tester.get_local_port(self.pf)
+            tgenInput.append((port, port, "multiqueue_2.pcap"))
+
+            _, pps = self.tester.traffic_generator_throughput(tgenInput, delay=30)
+            Mpps = pps / 1000000.0
+            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
+                                     self.number_of_ports))
+            data_row = [frame_size, str(Mpps), str(pct), tag]
+            self.result_table_add(data_row)
+            self.verify(Mpps != 0, "The recive data of frame-size: %d is 0" % frame_size)
+        self.result_table_print()
+    
+    def send_scapy(self, verify_type):
+        #
+        # Verify the virtio-pmd can recive the data before/after change queue size
+        #
+        payload_size = 256 - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+        self.tester.scapy_append('pk1 = [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk2 = [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk3 = [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk4 = [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk = pk1 + pk2 + pk3 + pk4')
+        self.tester.scapy_append('sendp(pk, iface="%s", count=10)' % (self.tx_interface))
+        self.tester.scapy_execute()
+
+        out = self.vhost_user.send_expect("show port stats 0", "testpmd> ", 120)
+        print out
+        rx_packet = re.search("RX-packets:\s*(\d*)", out)
+        rx_num = int(rx_packet.group(1))
+        tx_packet = re.search("TX-packets:\s*(\d*)", out)
+        tx_num = int(tx_packet.group(1))
+        if verify_type == "vhost queue = virtio queue" or verify_type == "vhost queue < virtio queue" :
+            verify_rx_num = 40
+            verify_tx_num = 40
+        elif verify_type == "vhost queue > virtio queue":
+            verify_rx_num = 40
+            verify_tx_num = 10
+
+        self.verify(rx_num >= verify_rx_num and tx_num >= verify_tx_num, "The vm port lost some packets")
+
+    def test_perf_pvp_multiqemu_mergeable_pmd(self):
+        #
+        # Test the performance for mergeable path
+        #
+        self.launch_testpmd()
+        self.start_onevm()
+        self.get_vm_coremask()
+
+        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+        
+        self.vhost_user.send_expect("stop", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+        time.sleep(5)
+        self.send_performance(self.running_case, self.frame_sizes, "Virtio 0.95 Mergeable Multiqueue Performance")
+        self.vm_dut.kill_all()
+
+    def test_dynamic_change_virtio_queue_size(self):
+        #
+        # Test the performance for change vritio queue size
+        #
+        self.launch_testpmd()
+        self.start_onevm()
+        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c %s -n 3" + \
+                                  " -- -i --tx-offloads=0x0 " + \
+                                  " --rxq=1 --txq=1 --rss-ip --nb-cores=1"
+        self.get_vm_coremask()
+        self.vm_dut.send_expect(self.vm_testpmd_queue_1 % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_scapy("vhost queue > virtio queue")
+
+        self.vm_dut.send_expect("stop", "testpmd>", 20)
+        self.vm_dut.send_expect("port stop all", "testpmd>")
+        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
+        self.vm_dut.send_expect("port start all", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.vhost_user.send_expect("stop", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_scapy("vhost queue = virtio queue")
+
+        self.vm_dut.kill_all()
+        self.vhost_user.send_expect("quit", "# ", 120)
+
+    def test_dynamic_change_vhost_queue_size(self):
+        #
+        # Test the performance for change vhost queue size
+        #
+        self.queue_number = 2
+        testcmd = self.target + "/app/testpmd -c %s -n %d --socket-mem 1024,1024" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' -- -i --rxq=1 --txq=1 --nb-cores=1"
+        self.coremask = utils.create_mask(self.cores)
+        testcmd_start = testcmd % (self.coremask, self.memory_channel)
+
+        self.vhost_user = self.dut.new_session(suite="user")
+
+        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
+        self.vhost_user.send_expect(testcmd_start, "testpmd> ", 120)
+        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+
+        self.start_onevm()
+
+        self.get_vm_coremask()
+        self.vm_dut.send_expect(self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 120)
+
+        self.send_scapy("vhost queue < virtio queue")
+
+        self.vhost_user.send_expect("stop", "testpmd>", 20)
+        self.vhost_user.send_expect("port stop all", "testpmd>")
+        self.vhost_user.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.vhost_user.send_expect("port config all txq 2", "testpmd>")
+        self.vhost_user.send_expect("port start all", "testpmd>", 20)
+        self.vhost_user.send_expect("start", "testpmd>")
+        self.vhost_user.send_expect("clear port stats all", "testpmd>")
+
+        self.send_scapy("vhost queue = virtio queue")
+
+        self.vm_dut.kill_all()
+        self.vhost_user.send_expect("quit", "# ", 120)
+
+    def tear_down(self):
+        #
+        # Run after each test case.
+        # Clear vhost-switch and qemu to avoid blocking the following TCs
+        #
+        if getattr(self, 'vhost_user', None):
+            self.dut.close_session(self.vhost_user)
+        self.vm.stop()
+        time.sleep(2)
+
+    def tear_down_all(self):
+        """
+        Run after each test suite.
+        """
+        pass
-- 
2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
  2018-03-07  1:42 lihong
@ 2018-03-08  3:29 ` Liu, Yong
  0 siblings, 0 replies; 8+ messages in thread
From: Liu, Yong @ 2018-03-08  3:29 UTC (permalink / raw)
  To: Ma, LihongX, dts; +Cc: Ma, LihongX

Lihong, my comments are inline.

Thanks,
Marvin

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong
> Sent: Sunday, April 12, 2020 7:53 PM
> To: dts@dpdk.org
> Cc: Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts] [PATCH V1] Add test suite about vhost multi queue qemu
> 
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
>  tests/TestSuite_vhost_multi_queue_qemu.py | 297
> ++++++++++++++++++++++++++++++
>  1 file changed, 297 insertions(+)
>  create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py
> 
> diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py
> b/tests/TestSuite_vhost_multi_queue_qemu.py
> new file mode 100644
> index 0000000..4538fd0
> --- /dev/null
> +++ b/tests/TestSuite_vhost_multi_queue_qemu.py
> @@ -0,0 +1,297 @@
> +# BSD LICENSE
> +#
> +# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
> +# All rights reserved.

Please update the date.

> +#
> +# Redistribution and use in source and binary forms, with or without
> +# modification, are permitted provided that the following conditions
> +# are met:
> +#
> +#   * Redistributions of source code must retain the above copyright
> +#     notice, this list of conditions and the following disclaimer.
> +#   * Redistributions in binary form must reproduce the above copyright
> +#     notice, this list of conditions and the following disclaimer in
> +#     the documentation and/or other materials provided with the
> +#     distribution.
> +#   * Neither the name of Intel Corporation nor the names of its
> +#     contributors may be used to endorse or promote products derived
> +#     from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> +
> +"""
> +DPDK Test suite.
> +
> +Vhost PVP performance using Qemu test suite.
> +"""
> +import os
> +import re
> +import time
> +import utils
> +from scapy.utils import wrpcap, rdpcap
> +from test_case import TestCase
> +from exception import VerifyFailure
> +from settings import HEADER_SIZE
> +from etgen import IxiaPacketGenerator

Please remove unused import. 

> +from qemu_kvm import QEMUKvm
> +
Now we have wrapper function for virtual machine object import. Please not use QEMU object directly. 
from virt_common import VM


> +
> +class TestVhostUserOneCopyOneVm(TestCase):
> +
> +    def set_up_all(self):
> +        # Get and verify the ports
> +        self.dut_ports = self.dut.get_ports()
> +        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for
> testing")
> +        local_port = self.tester.get_local_port(self.dut_ports[0])
> +        self.tx_interface = self.tester.get_interface(local_port)
> +        # Get the port's socket
> +        self.pf = self.dut_ports[0]
> +        netdev = self.dut.ports_info[self.pf]['port']
> +        self.socket = netdev.get_nic_socket()
> +        self.cores = self.dut.get_core_list("1S/3C/1T",
> socket=self.socket)

You may need to check whether return core is sufficient.

> +
> +        self.queue_number = 2
> +
> +        # Using file to save the vhost sample output since in jumboframe
> case,
> +        # there will be lots of output
> +
> +        self.virtio1 = "eth1"
> +        self.virtio1_mac = "52:54:00:00:00:01"
> +        self.vm_dut = None
> +
> +        self.number_of_ports = 1
> +        self.header_row = ["FrameSize(B)", "Throughput(Mpps)",
> "LineRate(%)", "Cycle"]
> +        self.memory_channel = 4

You can get dut channel by function call "self.dut.get_memory_channels()".

> +        if self.dut.cores[len(self.dut.cores)-1]['socket'] == '0':
> +            self.socket_mem = '1024'
> +        else:
> +            self.socket_mem = '1024,1024'

If socket number is required, I'd like add new function in crb object to do that. 

> +
> +    def set_up(self):
> +        #
> +        # Run before each test case.
> +        #
> +        self.dut.send_expect("rm -rf ./vhost.out", "#")
> +        self.dut.send_expect("rm -rf ./vhost-net*", "#")
> +        self.dut.send_expect("killall -s INT vhost-switch", "#")
> +
> +        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
> +        self.vm_testpmd_vector = self.target + "/app/testpmd -c 0x07 -n
> 3" + \
> +                                 " -- -i --tx-offloads=0x0 " + \
> +                                 " --rxq=%d --txq=%d --rss-ip --nb-
> cores=2" % (self.queue_number, self.queue_number)
> +
> +    def launch_testpmd(self, queue=2):
> +        #
> +        # Launch the vhost sample with different parameters
> +        #
> +        self.testcmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -
> n %d --socket-mem %s" + \
> +                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' --
> -i --rxq=%d --txq=%d --nb-cores=2"

Please use self.target for DPDK target.

> +        self.coremask = utils.create_mask(self.cores)
> +        self.testcmd_start = self.testcmd % (self.coremask,
> self.memory_channel, self.socket_mem, queue, queue, queue)
> +
Local variable should be enough, testcmd_start won't be used in other places.

> +        self.vhost_user = self.dut.new_session(suite="user")


Please remember to close this session in tear_down function. 
> +
> +        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
> +        self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120)
> +        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +
> +    def start_onevm(self, path="", modem=0):
> +        #
> +        # Start One VM with one virtio device
> +        #
> +        self.vm = QEMUKvm(self.dut, 'vm0', 'vhost_sample')
> +        if(path != ""):
> +            self.vm.set_qemu_emulator(path)
> +        vm_params = {}
> +        vm_params['driver'] = 'vhost-user'
> +        vm_params['opt_path'] = './vhost-net'
> +        vm_params['opt_mac'] = self.virtio1_mac
> +        vm_params['opt_queue'] = self.queue_number
> +        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=6'
> +        if(modem == 1):
> +            vm_params['opt_settings'] = 'disable-modern=false'
> +        self.vm.set_vm_device(**vm_params)
> +
> +        try:
> +            self.vm_dut = self.vm.start()
> +            if self.vm_dut is None:
> +                raise Exception("Set up VM ENV failed")
> +        except Exception as e:
> +            self.logger.error("ERROR: Failure for %s" % str(e))
> +
> +        return True
> +
> +    def vm_testpmd_start(self):
> +        #
> +        # Start testpmd in vm
> +        #
> +        if self.vm_dut is not None:
> +            self.vm_dut.send_expect(self.vm_testpmd_vector, "testpmd>",
> 20)
> +            self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +            self.vm_dut.send_expect("start tx_first", "testpmd>")
> +
> +    def send_verify(self, case, frame_sizes, tag="Performance"):

Look like nothing is verified in this function, you may need to change the function name.

> +        self.result_table_create(self.header_row)
> +        for frame_size in frame_sizes:
> +            info = "Running test %s, and %d frame size." % (case,
> frame_size)
> +            self.logger.info(info)
> +            payload_size = frame_size - HEADER_SIZE['eth'] -
> HEADER_SIZE['ip'] - HEADER_SIZE['udp']
> +            tgenInput = []
> +
> +            self.tester.scapy_append('a=
> [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('b=
> [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('c=
> [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('d=
> [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +            self.tester.scapy_append('a= a + b + c + d')
> +            self.tester.scapy_append('wrpcap("multiqueue_2.pcap", a)')
> +            self.tester.scapy_execute()
> +
> +            port = self.tester.get_local_port(self.pf)
> +            tgenInput.append((port, port, "multiqueue_2.pcap"))
> +
> +            _, pps = self.tester.traffic_generator_throughput(tgenInput,
> delay=30)
> +            Mpps = pps / 1000000.0
> +            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
> +                                     self.number_of_ports))
> +            data_row = [frame_size, str(Mpps), str(pct), tag]
> +            self.result_table_add(data_row)
> +            #self.verify(Mpps != 0, "The recive data of pak-size: %d is
> 0")

Please fix typo here.

> +        self.result_table_print()
> +
> +    def send_scapy(self, verify_type = 1):

Please add comment this function.
Please use string for verify_type argument which is more readable.

> +        payload_size = 256 - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] -
> HEADER_SIZE['udp']
> +        self.tester.scapy_append('pk1 =
> [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk2 =
> [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk3 =
> [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk4 =
> [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac,
> payload_size))
> +        self.tester.scapy_append('pk = pk1 + pk2 + pk3 + pk4')
> +        self.tester.scapy_append('sendp(pk, iface="%s", count=10)' %
> (self.tx_interface))
> +        self.tester.scapy_execute()
> +
> +        out = self.vhost_user.send_expect("show port stats 0", "testpmd>
> ", 120)
> +        print out
> +        rx_packet = re.search("RX-packets:\s*(\d*)", out)
> +        rx_num = int(rx_packet.group(1))
> +        tx_packet = re.search("TX-packets:\s*(\d*)", out)
> +        tx_num = int(tx_packet.group(1))
> +        if verify_type == 1:
> +            verify_rx_num = 40
> +            verify_tx_num = 40
> +        else:	
> +            verify_rx_num = 40
> +            verify_tx_num = 10
> +
> +        self.verify(rx_num >= verify_rx_num and tx_num >= verify_tx_num,
> "The vm port lost some packets")
> +
> +    def test_perf_pvp_multiqemu_mergeable_pmd(self):
> +        #
> +        # Test the performance for mergeable path
> +        #
> +        self.launch_testpmd()
> +        self.start_onevm()
> +        self.vm_dut.send_expect(self.vm_testpmd_vector, "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.vhost_user.send_expect("stop", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +        time.sleep(5)
> +        self.send_verify(self.running_case, self.frame_sizes, "Virtio
> 0.95 Mergeable Multiqueue Performance")
> +        self.vm_dut.kill_all()
> +
> +    def test_dynamic_change_virtio_queue_size(self):
> +        #
> +        # Test the performance for change vritio queue size
> +        #
> +        self.launch_testpmd()
> +        self.start_onevm()
> +        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c 0x07 -n
> 3" + \
> +                                  " -- -i --tx-offloads=0x0 " + \
> +                                  " --rxq=1 --txq=1 --rss-ip --nb-
> cores=1"
> +
> +        self.vm_dut.send_expect(self.vm_testpmd_queue_1, "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.vhost_user.send_expect("clear port stats all", "testpmd> ",
> 120)
> +        self.send_scapy(verify_type = 2)
> +
> +        self.vm_dut.send_expect("stop", "testpmd>", 20)
> +        self.vm_dut.send_expect("port stop all", "testpmd>")
> +        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
> +        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
> +        self.vm_dut.send_expect("port start all", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +
> +        self.vhost_user.send_expect("stop", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +
> +        self.vhost_user.send_expect("clear port stats all", "testpmd> ",
> 120)
> +        self.send_scapy(verify_type = 1)
> +
> +        self.vm_dut.kill_all()
> +        self.vhost_user.send_expect("quit", "# ", 120)
> +
> +    def test_dynamic_change_vhost_queue_size(self):
> +        #
> +        # Test the performance for change vhost queue size
> +        #
> +        self.queue_number = 2
> +        self.testcmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -
> n %d --socket-mem %s" + \
> +                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' --
> -i --rxq=1 --txq=1 --nb-cores=1"
> +        self.coremask = utils.create_mask(self.cores)
> +        self.testcmd_start = self.testcmd % (self.coremask,
> self.memory_channel, self.socket_mem)
> +
> +        self.vhost_user = self.dut.new_session(suite="user")
> +
> +        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
> +        self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120)
> +        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
> +        self.vhost_user.send_expect("start", "testpmd> ", 120)
> +
> +        self.start_onevm()
> +
> +        self.vm_dut.send_expect(self.vm_testpmd_vector, "testpmd>", 20)
> +        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
> +        self.vm_dut.send_expect("start", "testpmd>")
> +        self.vhost_user.send_expect("clear port stats all", "testpmd> ",
> 120)
> +
> +        self.send_scapy(verify_type = 1)
> +
> +        self.vhost_user.send_expect("stop", "testpmd>", 20)
> +        self.vhost_user.send_expect("port stop all", "testpmd>")
> +        self.vhost_user.send_expect("port config all rxq 2", "testpmd>",
> 20)
> +        self.vhost_user.send_expect("port config all txq 2", "testpmd>")
> +        self.vhost_user.send_expect("port start all", "testpmd>", 20)
> +        self.vhost_user.send_expect("start", "testpmd>")
> +        self.vhost_user.send_expect("clear port stats all", "testpmd>")
> +
> +        self.send_scapy(verify_type = 1)
> +
> +        self.vm_dut.kill_all()
> +        self.vhost_user.send_expect("quit", "# ", 120)
> +
> +    def tear_down(self):
> +        #
> +        # Run after each test case.
> +        # Clear vhost-switch and qemu to avoid blocking the following TCs
> +        #
> +        self.vm.stop()
> +        time.sleep(2)
> +
> +    def tear_down_all(self):
> +        """
> +        Run after each test suite.
> +        """
> +        pass
> --
> 2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dts] [PATCH V1] Add test suite about vhost multi queue qemu
@ 2018-03-07  1:42 lihong
  2018-03-08  3:29 ` Liu, Yong
  0 siblings, 1 reply; 8+ messages in thread
From: lihong @ 2018-03-07  1:42 UTC (permalink / raw)
  To: dts; +Cc: lihong

Signed-off-by: lihong <lihongx.ma@intel.com>
---
 tests/TestSuite_vhost_multi_queue_qemu.py | 297 ++++++++++++++++++++++++++++++
 1 file changed, 297 insertions(+)
 create mode 100644 tests/TestSuite_vhost_multi_queue_qemu.py

diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py b/tests/TestSuite_vhost_multi_queue_qemu.py
new file mode 100644
index 0000000..4538fd0
--- /dev/null
+++ b/tests/TestSuite_vhost_multi_queue_qemu.py
@@ -0,0 +1,297 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#   * Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+#   * Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in
+#     the documentation and/or other materials provided with the
+#     distribution.
+#   * Neither the name of Intel Corporation nor the names of its
+#     contributors may be used to endorse or promote products derived
+#     from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+DPDK Test suite.
+
+Vhost PVP performance using Qemu test suite.
+"""
+import os
+import re
+import time
+import utils
+from scapy.utils import wrpcap, rdpcap
+from test_case import TestCase
+from exception import VerifyFailure
+from settings import HEADER_SIZE
+from etgen import IxiaPacketGenerator
+from qemu_kvm import QEMUKvm
+
+
+class TestVhostUserOneCopyOneVm(TestCase):
+
+    def set_up_all(self):
+        # Get and verify the ports
+        self.dut_ports = self.dut.get_ports()
+        self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
+        local_port = self.tester.get_local_port(self.dut_ports[0])
+        self.tx_interface = self.tester.get_interface(local_port)
+        # Get the port's socket
+        self.pf = self.dut_ports[0]
+        netdev = self.dut.ports_info[self.pf]['port']
+        self.socket = netdev.get_nic_socket()
+        self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket)
+
+        self.queue_number = 2
+
+        # Using file to save the vhost sample output since in jumboframe case,
+        # there will be lots of output
+
+        self.virtio1 = "eth1"
+        self.virtio1_mac = "52:54:00:00:00:01"
+        self.vm_dut = None
+
+        self.number_of_ports = 1
+        self.header_row = ["FrameSize(B)", "Throughput(Mpps)", "LineRate(%)", "Cycle"]
+        self.memory_channel = 4
+        if self.dut.cores[len(self.dut.cores)-1]['socket'] == '0':
+            self.socket_mem = '1024'
+        else:
+            self.socket_mem = '1024,1024'
+
+    def set_up(self):
+        #
+        # Run before each test case.
+        #
+        self.dut.send_expect("rm -rf ./vhost.out", "#")
+        self.dut.send_expect("rm -rf ./vhost-net*", "#")
+        self.dut.send_expect("killall -s INT vhost-switch", "#")
+
+        self.frame_sizes = [64, 128, 256, 512, 1024, 1500]
+        self.vm_testpmd_vector = self.target + "/app/testpmd -c 0x07 -n 3" + \
+                                 " -- -i --tx-offloads=0x0 " + \
+                                 " --rxq=%d --txq=%d --rss-ip --nb-cores=2" % (self.queue_number, self.queue_number)
+
+    def launch_testpmd(self, queue=2):
+        #
+        # Launch the vhost sample with different parameters
+        #
+        self.testcmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %d --socket-mem %s" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=%d' -- -i --rxq=%d --txq=%d --nb-cores=2"
+        self.coremask = utils.create_mask(self.cores)
+        self.testcmd_start = self.testcmd % (self.coremask, self.memory_channel, self.socket_mem, queue, queue, queue)
+
+        self.vhost_user = self.dut.new_session(suite="user")
+
+        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
+        self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120)
+        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+
+    def start_onevm(self, path="", modem=0):
+        #
+        # Start One VM with one virtio device
+        #
+        self.vm = QEMUKvm(self.dut, 'vm0', 'vhost_sample')
+        if(path != ""):
+            self.vm.set_qemu_emulator(path)
+        vm_params = {}
+        vm_params['driver'] = 'vhost-user'
+        vm_params['opt_path'] = './vhost-net'
+        vm_params['opt_mac'] = self.virtio1_mac
+        vm_params['opt_queue'] = self.queue_number
+        vm_params['opt_settings'] = 'mrg_rxbuf=on,mq=on,vectors=6'
+        if(modem == 1):
+            vm_params['opt_settings'] = 'disable-modern=false'
+        self.vm.set_vm_device(**vm_params)
+
+        try:
+            self.vm_dut = self.vm.start()
+            if self.vm_dut is None:
+                raise Exception("Set up VM ENV failed")
+        except Exception as e:
+            self.logger.error("ERROR: Failure for %s" % str(e))
+
+        return True
+
+    def vm_testpmd_start(self):
+        #
+        # Start testpmd in vm
+        #
+        if self.vm_dut is not None:
+            self.vm_dut.send_expect(self.vm_testpmd_vector, "testpmd>", 20)
+            self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+            self.vm_dut.send_expect("start tx_first", "testpmd>")
+
+    def send_verify(self, case, frame_sizes, tag="Performance"):
+        self.result_table_create(self.header_row)
+        for frame_size in frame_sizes:
+            info = "Running test %s, and %d frame size." % (case, frame_size)
+            self.logger.info(info)
+            payload_size = frame_size - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+            tgenInput = []
+
+            self.tester.scapy_append('a= [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('b= [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('c= [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('d= [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+            self.tester.scapy_append('a= a + b + c + d')
+            self.tester.scapy_append('wrpcap("multiqueue_2.pcap", a)')
+            self.tester.scapy_execute()
+
+            port = self.tester.get_local_port(self.pf)
+            tgenInput.append((port, port, "multiqueue_2.pcap"))
+
+            _, pps = self.tester.traffic_generator_throughput(tgenInput, delay=30)
+            Mpps = pps / 1000000.0
+            pct = Mpps * 100 / float(self.wirespeed(self.nic, frame_size,
+                                     self.number_of_ports))
+            data_row = [frame_size, str(Mpps), str(pct), tag]
+            self.result_table_add(data_row)
+            #self.verify(Mpps != 0, "The recive data of pak-size: %d is 0")
+        self.result_table_print()
+    
+    def send_scapy(self, verify_type = 1):
+        payload_size = 256 - HEADER_SIZE['eth'] - HEADER_SIZE['ip'] - HEADER_SIZE['udp']
+        self.tester.scapy_append('pk1 = [Ether(dst="%s")/IP(dst="1.1.1.1")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk2 = [Ether(dst="%s")/IP(dst="1.1.1.20")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk3 = [Ether(dst="%s")/IP(dst="1.1.1.7")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk4 = [Ether(dst="%s")/IP(dst="1.1.1.8")/UDP()/("X"*%d)]' % (self.virtio1_mac, payload_size))
+        self.tester.scapy_append('pk = pk1 + pk2 + pk3 + pk4')
+        self.tester.scapy_append('sendp(pk, iface="%s", count=10)' % (self.tx_interface))
+        self.tester.scapy_execute()
+
+        out = self.vhost_user.send_expect("show port stats 0", "testpmd> ", 120)
+        print out
+        rx_packet = re.search("RX-packets:\s*(\d*)", out)
+        rx_num = int(rx_packet.group(1))
+        tx_packet = re.search("TX-packets:\s*(\d*)", out)
+        tx_num = int(tx_packet.group(1))
+        if verify_type == 1:
+            verify_rx_num = 40
+            verify_tx_num = 40
+        else:
+            verify_rx_num = 40
+            verify_tx_num = 10
+
+        self.verify(rx_num >= verify_rx_num and tx_num >= verify_tx_num, "The vm port lost some packets")
+
+    def test_perf_pvp_multiqemu_mergeable_pmd(self):
+        #
+        # Test the performance for mergeable path
+        #
+        self.launch_testpmd()
+        self.start_onevm()
+        self.vm_dut.send_expect(self.vm_testpmd_vector, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.vhost_user.send_expect("stop", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+        time.sleep(5)
+        self.send_verify(self.running_case, self.frame_sizes, "Virtio 0.95 Mergeable Multiqueue Performance")
+        self.vm_dut.kill_all()
+
+    def test_dynamic_change_virtio_queue_size(self):
+        #
+        # Test the performance for change vritio queue size
+        #
+        self.launch_testpmd()
+        self.start_onevm()
+        self.vm_testpmd_queue_1 = self.target + "/app/testpmd -c 0x07 -n 3" + \
+                                  " -- -i --tx-offloads=0x0 " + \
+                                  " --rxq=1 --txq=1 --rss-ip --nb-cores=1"
+
+        self.vm_dut.send_expect(self.vm_testpmd_queue_1, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_scapy(verify_type = 2)
+
+        self.vm_dut.send_expect("stop", "testpmd>", 20)
+        self.vm_dut.send_expect("port stop all", "testpmd>")
+        self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.vm_dut.send_expect("port config all txq 2", "testpmd>")
+        self.vm_dut.send_expect("port start all", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+
+        self.vhost_user.send_expect("stop", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 120)
+        self.send_scapy(verify_type = 1)
+
+        self.vm_dut.kill_all()
+        self.vhost_user.send_expect("quit", "# ", 120)
+
+    def test_dynamic_change_vhost_queue_size(self):
+        #
+        # Test the performance for change vhost queue size
+        #
+        self.queue_number = 2
+        self.testcmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %d --socket-mem %s" + \
+                       " --vdev 'net_vhost0,iface=vhost-net,queues=2' -- -i --rxq=1 --txq=1 --nb-cores=1"
+        self.coremask = utils.create_mask(self.cores)
+        self.testcmd_start = self.testcmd % (self.coremask, self.memory_channel, self.socket_mem)
+
+        self.vhost_user = self.dut.new_session(suite="user")
+
+        self.vhost_user.send_expect("cd /root/dpdk", "#", 120)
+        self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120)
+        self.vhost_user.send_expect("set fwd mac", "testpmd> ", 120)
+        self.vhost_user.send_expect("start", "testpmd> ", 120)
+
+        self.start_onevm()
+
+        self.vm_dut.send_expect(self.vm_testpmd_vector, "testpmd>", 20)
+        self.vm_dut.send_expect("set fwd mac", "testpmd>", 20)
+        self.vm_dut.send_expect("start", "testpmd>")
+        self.vhost_user.send_expect("clear port stats all", "testpmd> ", 120)
+
+        self.send_scapy(verify_type = 1)
+
+        self.vhost_user.send_expect("stop", "testpmd>", 20)
+        self.vhost_user.send_expect("port stop all", "testpmd>")
+        self.vhost_user.send_expect("port config all rxq 2", "testpmd>", 20)
+        self.vhost_user.send_expect("port config all txq 2", "testpmd>")
+        self.vhost_user.send_expect("port start all", "testpmd>", 20)
+        self.vhost_user.send_expect("start", "testpmd>")
+        self.vhost_user.send_expect("clear port stats all", "testpmd>")
+
+        self.send_scapy(verify_type = 1)
+
+        self.vm_dut.kill_all()
+        self.vhost_user.send_expect("quit", "# ", 120)
+
+    def tear_down(self):
+        #
+        # Run after each test case.
+        # Clear vhost-switch and qemu to avoid blocking the following TCs
+        #
+        self.vm.stop()
+        time.sleep(2)
+
+    def tear_down_all(self):
+        """
+        Run after each test suite.
+        """
+        pass
-- 
2.7.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-03-23  2:21 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-15  8:38 [dts] [PATCH V1] Add test suite about vhost multi queue qemu lihong
2018-03-19  8:34 ` Liu, Yong
  -- strict thread matches above, loose matches on Subject: below --
2018-03-20  8:41 lihong
2018-03-23 10:10 ` Liu, Yong
2018-03-09  7:28 lihong
2018-03-09  8:51 ` Liu, Yong
2018-03-07  1:42 lihong
2018-03-08  3:29 ` Liu, Yong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).