From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B8AB2A056B; Thu, 12 Mar 2020 14:11:13 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 943552BE3; Thu, 12 Mar 2020 14:11:13 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 7F1733B5 for ; Thu, 12 Mar 2020 14:11:11 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 12 Mar 2020 06:11:10 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,544,1574150400"; d="scan'208";a="443919410" Received: from dpdk-yinan-purley.sh.intel.com ([10.67.117.227]) by fmsmga006.fm.intel.com with ESMTP; 12 Mar 2020 06:11:09 -0700 From: Yinan To: dts@dpdk.org Cc: Wang Yinan Date: Thu, 12 Mar 2020 06:05:45 +0000 Message-Id: <20200312060545.15118-1-yinan.wang@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dts] [PATCH v1] tests: add test suite for vhost_pmd_xstats test X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" From: Wang Yinan Signed-off-by: Wang Yinan --- tests/TestSuite_vhost_pmd_xstats.py | 291 ++++++++++++++++++---------- 1 file changed, 184 insertions(+), 107 deletions(-) diff --git a/tests/TestSuite_vhost_pmd_xstats.py b/tests/TestSuite_vhost_pmd_xstats.py index 0ebbe1c..610a11a 100755 --- a/tests/TestSuite_vhost_pmd_xstats.py +++ b/tests/TestSuite_vhost_pmd_xstats.py @@ -56,69 +56,37 @@ class TestVhostPmdXstats(TestCase): self.unbind_ports = copy.deepcopy(self.dut_ports) self.unbind_ports.remove(0) self.dut.unbind_interfaces_linux(self.unbind_ports) - cores = self.dut.get_core_list("1S/4C/1T") - self.coremask = utils.create_mask(cores) txport = self.tester.get_local_port(self.dut_ports[0]) self.txItf = self.tester.get_interface(txport) - self.scapy_num = 0 self.dmac = self.dut.get_mac_address(self.dut_ports[0]) self.virtio1_mac = "52:54:00:00:00:01" - self.pci_info = self.dut.ports_info[0]['pci'] - - # build sample app - out = self.dut.build_dpdk_apps("./examples/vhost") - self.verify("Error" not in out, "compilation error 1") - self.verify("No such file" not in out, "compilation error 2") - self.base_dir = self.dut.base_dir.replace('~', '/root') + self.core_config = "1S/6C/1T" + self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.cores_num = len([n for n in self.dut.cores if int(n['socket']) + == self.ports_socket]) + self.verify(self.cores_num >= 6, + "There has not enough cores to test this case") + self.core_list = self.dut.get_core_list( + self.core_config, socket=self.ports_socket) + self.core_list_user = self.core_list[0:3] + self.core_list_host = self.core_list[3:6] + self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) def set_up(self): """ Run before each test case. Launch vhost sample using default params """ - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall vhost-switch", "#") - dut_arch = self.dut.send_expect("uname -m", "#") - self.dut.send_expect("killall qemu-system-%s" % dut_arch, "#") + self.dut.send_expect("rm -rf ./vhost-net*", "#") + self.dut.send_expect("killall -s INT testpmd", "#") + self.vhost_user = self.dut.new_session(suite="vhost-user") + self.virtio_user = self.dut.new_session(suite="virtio-user") - def vm_testpmd_start(self): - """ - Start testpmd in vm - """ - self.vm_testpmd = "./%s/app/testpmd -c 0x3 -n 4 -- -i --tx-offloads=0" % self.target - if self.vm_dut is not None: - self.vm_dut.send_expect(self.vm_testpmd, "testpmd>", 60) - - def vm_tx_first_start(self): - """ - Start tx_first - """ - if self.vm_dut is not None: - # Start tx_first - self.vm_dut.send_expect("set fwd mac", "testpmd>") - self.vm_dut.send_expect("start tx_first", "testpmd>") - - def start_onevm(self): - """ - Start One VM with one virtio device - """ - self.vm_dut = None - self.vm = QEMUKvm(self.dut, 'vm0', 'vhost_pmd_xstats') - vm_params = {} - vm_params['driver'] = 'vhost-user' - vm_params['opt_path'] = self.base_dir + '/vhost-net' - vm_params['opt_mac'] = self.virtio1_mac - self.vm.set_vm_device(**vm_params) - - try: - self.vm_dut = self.vm.start() - if self.vm_dut is None: - raise Exception("Set up VM ENV failed") - except Exception as e: - self.logger.error("Failure for %s" % str(e)) - return True + @property + def check_2M_env(self): + out = self.dut.send_expect("cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# ") + return True if out == '2048' else False def scapy_send_packet(self, pktsize, dmac, num=1): """ @@ -126,85 +94,196 @@ class TestVhostPmdXstats(TestCase): """ self.scapy_num += 1 pkt = Packet(pkt_type='TCP', pkt_len=pktsize) - pkt.config_layer('ether', {'dst': dmac, }) + pkt.config_layer('ether', {'dst': dmac}) pkt.send_pkt(self.tester, tx_port=self.txItf, count=num) def send_verify(self, scope, mun): """ according the scope to check results """ - out = self.dut.send_expect( - "show port xstats %s" % self.dut_ports[0], "testpmd>", 60) - packet = re.search("rx_%s_packets:\s*(\d*)" % scope, out) - sum_packet = packet.group(1) - self.verify(int(sum_packet) >= mun, - "Insufficient the received package") + out = self.vhost_user.send_expect( + "show port xstats 1", "testpmd>", 60) + packet_rx = re.search("rx_%s_packets:\s*(\d*)" % scope, out) + sum_packet_rx = packet_rx.group(1) + packet_tx = re.search("tx_%s_packets:\s*(\d*)" % scope, out) + sum_packet_tx = packet_tx.group(1) + self.verify(int(sum_packet_rx) >= mun, + "Insufficient the received packets from nic") + self.verify(int(sum_packet_tx) >= mun, + "Insufficient the received packets from virtio") - def prepare_start(self): + def start_vhost_testpmd(self): """ - prepare all of the conditions for start + start testpmd on vhost """ - testcmd = self.target + "/app/testpmd " - vdev = [r"'net_vhost0,iface=%s/vhost-net,queues=1'" % self.base_dir] - eal_params = self.dut.create_eal_parameters(cores="1S/4C/1T", ports=[self.pci_info], vdevs=vdev) - para = " -- -i --nb-cores=1" - cmd = testcmd + eal_params + para - self.dut.send_expect(cmd, "testpmd>", 60) - self.start_onevm() - self.vm_testpmd_start() - self.dut.send_expect("set fwd mac", "testpmd>", 60) - self.dut.send_expect("start tx_first", "testpmd>", 60) - self.vm_tx_first_start() + eal_param = self.dut.create_eal_parameters(socket=self.ports_socket, cores=self.core_list_host, prefix='vhost', + vdevs=['net_vhost0,iface=vhost-net,queues=2,client=0']) + command_line_client = "./%s/app/testpmd " % self.target + eal_param + ' -- -i --nb-cores=2 --rxq=2 --txq=2 --rss-ip' + self.vhost_user.send_expect(command_line_client, "testpmd> ", 120) + self.vhost_user.send_expect("set fwd io", "testpmd> ", 120) + self.vhost_user.send_expect("start", "testpmd> ", 120) - def test_based_size(self): + def start_virtio_testpmd(self, args): + """ + start testpmd on virtio + """ + eal_param = self.dut.create_eal_parameters(socket=self.ports_socket, cores=self.core_list_user, prefix='virtio', + no_pci=True, vdevs=[ + 'net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,queues=2,%s' % args["version"]]) + if self.check_2M_env: + eal_param += " --single-file-segments" + command_line_user = "./%s/app/testpmd " % self.target + eal_param + " -- -i %s --rss-ip --nb-cores=2 --rxq=2 --txq=2" % \ + args["path"] + self.virtio_user.send_expect(command_line_user, "testpmd> ", 120) + self.virtio_user.send_expect("set fwd io", "testpmd> ", 120) + self.virtio_user.send_expect("start", "testpmd> ", 120) + + def xstats_number_and_type_verify(self): """ Verify receiving and transmitting packets correctly in the Vhost PMD xstats """ - self.prepare_start() - out = self.dut.send_expect( - "show port xstats %s" % self.dut_ports[0], "testpmd>", 60) + out = self.vhost_user.send_expect( + "show port xstats 1", "testpmd>", 60) p = re.compile(r'rx_size_[0-9]+_[to_\w+]*packets') categories = p.findall(out) + categories = categories[:-1] self.verify(len(categories) > 0, 'Unable to find the categories of RX packet size!') for cat in categories: scope = re.search(r'(?<=rx_)\w+(?=_packets)', cat).group(0) pktsize = int(re.search(r'(?<=rx_size_)\d+', cat).group(0)) if pktsize > 1518: self.tester.send_expect('ifconfig %s mtu %d' % (self.txItf, ETHER_JUMBO_FRAME_MTU), '# ') + types = ['ff:ff:ff:ff:ff:ff', '01:00:00:33:00:01'] + for p in types: + if p == 'ff:ff:ff:ff:ff:ff': + scope = 'broadcast' + self.dmac = 'ff:ff:ff:ff:ff:ff' + elif p == '01:00:00:33:00:01': + scope = 'multicast' + self.dmac = '01:00:00:33:00:01' + self.scapy_send_packet(int(pktsize + 4), self.dmac, 10000) + self.send_verify(scope, 10000) + self.vhost_user.send_expect("clear port xstats all", "testpmd>", 60) + self.tester.send_expect('ifconfig %s mtu %d' % (self.txItf, DEFAULT_JUMBO_FRAME_MTU), '# ') - self.scapy_send_packet(pktsize, self.dmac, 10000) - self.send_verify(scope, 10000) - self.clear_port_xstats(scope) - self.tester.send_expect('ifconfig %s mtu %d' % (self.txItf, DEFAULT_JUMBO_FRAME_MTU), '# ') + def test_vhost_xstats_virtio11_mergeable(self): + """ + performance for Vhost PVP virtio 1.1 Mergeable Path. + """ + virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_virtio11_no_mergeable(self): + """ + performance for Vhost PVP virtio1.1 no_mergeable Path. + """ + virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_virtio11_inorder_mergeable(self): + """ + performance for Vhost PVP virtio 1.1 inorder Mergeable Path. + """ + virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=1", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_virtio11_inorder_no_mergeable(self): + """ + performance for Vhost PVP virtio1.1 inorder no_mergeable Path. + """ + virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=0", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_inorder_mergeable(self): + """ + performance for Vhost PVP In_order mergeable Path. + """ + virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_inorder_no_mergeable(self): + """ + performance for Vhost PVP In_order no_mergeable Path. + """ + virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_mergeable(self): + """ + performance for Vhost PVP Mergeable Path. + """ + virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_no_mergeable(self): + """ + performance for Vhost PVP no_mergeable Path. + """ + virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0", + "path": "--tx-offloads=0x0 --enable-hw-vlan-strip --rss-ip"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def test_vhost_xstats_vector_rx(self): + """ + performance for Vhost PVP Vector_RX Path + """ + virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0", + "path": "--tx-offloads=0x0"} + self.start_vhost_testpmd() + self.start_virtio_testpmd(virtio_pmd_arg) + self.xstats_number_and_type_verify() + self.close_all_testpmd() + + def close_all_testpmd(self): + """ + close all testpmd of vhost and virtio + """ + self.vhost_user.send_expect("quit", "#", 60) + self.virtio_user.send_expect("quit", "#", 60) def clear_port_xstats(self, scope): - self.dut.send_expect("clear port xstats all", "testpmd>", 60) - out = self.dut.send_expect( - "show port xstats %s" % self.dut_ports[0], "testpmd>", 60) + self.vhost_user.send_expect("clear port xstats 1", "testpmd>", 60) + """ + out = self.vhost_user.send_expect( + "show port xstats 1", "testpmd>", 60) packet = re.search("rx_%s_packets:\s*(\d*)" % scope, out) sum_packet = packet.group(1) self.verify(int(sum_packet) == 0, "Insufficient the received package") - - def test_based_types(self): """ - Verify different type of packets receiving and transmitting packets correctly in the Vhost PMD xstats - """ - self.prepare_start() - types = ['ff:ff:ff:ff:ff:ff', '01:00:00:33:00:01'] - scope = '' - for p in types: - if p == 'ff:ff:ff:ff:ff:ff': - scope = 'broadcast' - self.dmac = 'ff:ff:ff:ff:ff:ff' - elif p == '01:00:00:33:00:01': - scope = 'multicast' - self.dmac = '01:00:00:33:00:01' - self.scapy_send_packet(64, self.dmac, 10000) - self.send_verify(scope, 10000) - self.clear_port_xstats(scope) - - def test_stability(self): + + def ltest_stability(self): """ Verify stability case with multiple queues for Vhost PMD xstats Send packets for 2 minutes, check the xstats still can work correctly @@ -218,8 +297,8 @@ class TestVhostPmdXstats(TestCase): self.scapy_send_packet(64, self.dmac, 1) if date_now >= date_new: break - out_0 = self.dut.send_expect( - "show port xstats %s" % self.dut_ports[0], "testpmd>", 60) + out_0 = self.vhost_user.send_expect( + "show port xstats 1", "testpmd>", 60) rx_packet = re.search("rx_size_64_packets:\s*(\d*)", out_0) rx_packets = rx_packet.group(1) self.verify(self.scapy_num == int(rx_packets), "Error for rx_packets:%s != tx_packets :%s" % ( @@ -229,12 +308,10 @@ class TestVhostPmdXstats(TestCase): """ Run after each test case. """ - self.vm._stop_vm() - self.dut.kill_all() - time.sleep(2) + self.dut.send_expect("killall -s INT testpmd", "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.bind_interfaces_linux(nics_to_bind=self.unbind_ports) + pass \ No newline at end of file -- 2.17.1