From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5C6E6A0548; Fri, 2 Apr 2021 08:17:41 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 222EB40696; Fri, 2 Apr 2021 08:17:41 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 4FBBE40150 for ; Fri, 2 Apr 2021 08:17:39 +0200 (CEST) IronPort-SDR: NSKs1Th/Ts9dXUSpknsSykLmLSIZkqvYa8MFX8o3JfLlqi56Q5kaPf1HYBVP5JBOu4zuY5FNIQ CqXYUrQKQJjg== X-IronPort-AV: E=McAfee;i="6000,8403,9941"; a="190168435" X-IronPort-AV: E=Sophos;i="5.81,299,1610438400"; d="scan'208";a="190168435" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Apr 2021 23:17:38 -0700 IronPort-SDR: t7Jk5AYuFZB5uiGEnWd8Z4Un8zQ+zsj9T1jHbtQNmPs/4aPJQYDszWPdGjDbO1u4w/VWmRG+Os zUqtAGx6U9Eg== X-IronPort-AV: E=Sophos;i="5.81,299,1610438400"; d="scan'208";a="419529624" Received: from unknown (HELO localhost.localdomain) ([10.240.183.222]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Apr 2021 23:17:35 -0700 From: Ling Wei To: dts@dpdk.org Cc: Ling Wei Date: Fri, 2 Apr 2021 14:16:15 +0800 Message-Id: <20210402061615.66125-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dts] [PATCH V1] tests/vm2vm_virtio_net_perf:Add and modify testcase sync with testplan X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" 1.Modify testcase 8 sync with testplan. 2.Add testcase 12 sync with testplan. 3.Add bind dut ports to DPDK-compatible driver method and step in tear_down_all to restore testbed. 4.Adjust code format. Signed-off-by: Ling Wei --- tests/TestSuite_vm2vm_virtio_net_perf.py | 284 +++++++++++++++-------- 1 file changed, 184 insertions(+), 100 deletions(-) diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py index b0733bc3..f7dd0d99 100644 --- a/tests/TestSuite_vm2vm_virtio_net_perf.py +++ b/tests/TestSuite_vm2vm_virtio_net_perf.py @@ -42,6 +42,7 @@ import re import time import string import random +import utils from virt_common import VM from test_case import TestCase from pmd_output import PmdOutput @@ -53,9 +54,7 @@ class TestVM2VMVirtioNetPerf(TestCase): self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) core_config = "1S/5C/1T" self.cores_list = self.dut.get_core_list(core_config, socket=self.ports_socket) - self.verify(len(self.cores_list) >= 4, - "There has not enough cores to test this suite %s" % - self.suite_name) + self.verify(len(self.cores_list) >= 4, "There has not enough cores to test this suite %s" % self.suite_name) self.vm_num = 2 self.virtio_ip1 = "1.1.1.2" self.virtio_ip2 = "1.1.1.3" @@ -68,12 +67,11 @@ class TestVM2VMVirtioNetPerf(TestCase): self.vhost = self.dut.new_session(suite="vhost") self.pmd_vhost = PmdOutput(self.dut, self.vhost) self.app_testpmd_path = self.dut.apps_name['test-pmd'] - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) # get cbdma device self.cbdma_dev_infos = [] self.dmas_info = None self.device_str = None + self.checked_vm = False self.dut.restore_interfaces() def set_up(self): @@ -130,7 +128,7 @@ class TestVM2VMVirtioNetPerf(TestCase): self.dut.send_expect('./usertools/dpdk-devbind.py -u %s' % self.device_str, '# ', 30) self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma %s' % self.device_str, '# ', 60) - def start_vhost_testpmd(self, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, used_queues=1): + def start_vhost_testpmd(self, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None): """ launch the testpmd with different parameters """ @@ -156,81 +154,67 @@ class TestVM2VMVirtioNetPerf(TestCase): vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' " % (self.base_dir, enable_queues, cbdma_arg_0) vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' " % (self.base_dir, enable_queues, cbdma_arg_1) eal_params = self.dut.create_eal_parameters(cores=self.cores_list, prefix='vhost', no_pci=no_pci) - params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % (nb_cores, used_queues, used_queues) + if rxq_txq is None: + params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024" % nb_cores + else: + params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % (nb_cores, rxq_txq, rxq_txq) self.command_line = testcmd + eal_params + vdev1 + vdev2 + params self.pmd_vhost.execute_cmd(self.command_line, timeout=30) self.pmd_vhost.execute_cmd('vhost enable tx all', timeout=30) self.pmd_vhost.execute_cmd('start', timeout=30) - def start_vms(self, path_mode, server_mode=False, opt_queue=1): + def start_vms(self, server_mode=False, opt_queue=None, vm_config='vhost_sample'): """ start two VM, each VM has one virtio device """ - if path_mode == 1: - setting_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" - elif path_mode == 2: - setting_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - elif path_mode == 4: - setting_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - elif path_mode == 5: - setting_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - elif path_mode == 6: - setting_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - elif path_mode == 10: - setting_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - elif path_mode == 11: - setting_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - for i in range(self.vm_num): vm_dut = None - vm_info = VM(self.dut, 'vm%d' % i, 'vm') + vm_info = VM(self.dut, 'vm%d' % i, vm_config) vm_params = {} vm_params['driver'] = 'vhost-user' if not server_mode: vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i else: vm_params['opt_path'] = self.base_dir + '/vhost-net%d' % i + ',server' - vm_params['opt_queue'] = opt_queue + if opt_queue is not None: + vm_params['opt_queue'] = opt_queue vm_params['opt_mac'] = "52:54:00:00:00:0%d" % (i+1) - vm_params['opt_settings'] = setting_args + vm_params['opt_settings'] = self.vm_args vm_info.set_vm_device(**vm_params) - time.sleep(3) try: vm_dut = vm_info.start(set_target=False) if vm_dut is None: raise Exception("Set up VM ENV failed") except Exception as e: - self.logger.error("Failure for %s" % str(e)) - raise e - vm_dut.restore_interfaces() - + print(utils.RED("Failure for %s" % str(e))) + self.verify(vm_dut is not None, "start vm failed") self.vm_dut.append(vm_dut) self.vm.append(vm_info) - def config_vm_env(self, combined=False, used_queues=1): + def config_vm_env(self, combined=False, rxq_txq=1): """ set virtio device IP and run arp protocal """ vm1_intf = self.vm_dut[0].ports_info[0]['intf'] vm2_intf = self.vm_dut[1].ports_info[0]['intf'] if combined: - self.vm_dut[0].send_expect("ethtool -L %s combined %d" % (vm1_intf, used_queues), "#", 10) + self.vm_dut[0].send_expect("ethtool -L %s combined %d" % (vm1_intf, rxq_txq), "#", 10) self.vm_dut[0].send_expect("ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10) if combined: - self.vm_dut[1].send_expect("ethtool -L %s combined %d" % (vm2_intf, used_queues), "#", 10) + self.vm_dut[1].send_expect("ethtool -L %s combined %d" % (vm2_intf, rxq_txq), "#", 10) self.vm_dut[1].send_expect("ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10) self.vm_dut[0].send_expect("arp -s %s %s" % (self.virtio_ip2, self.virtio_mac2), "#", 10) self.vm_dut[1].send_expect("arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10) - def prepare_test_env(self, path_mode, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, - server_mode=False, opt_queue=1, combined=False, used_queues=1): + def prepare_test_env(self, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, + server_mode=False, opt_queue=None, combined=False, rxq_txq=None, vm_config='vhost_sample'): """ start vhost testpmd and qemu, and config the vm env """ self.start_vhost_testpmd(cbdma=cbdma, no_pci=no_pci, client_mode=client_mode, enable_queues=enable_queues, - nb_cores=nb_cores, used_queues=used_queues) - self.start_vms(path_mode=path_mode, server_mode=server_mode, opt_queue=opt_queue) - self.config_vm_env(combined=combined, used_queues=used_queues) + nb_cores=nb_cores, rxq_txq=rxq_txq) + self.start_vms(server_mode=server_mode, opt_queue=opt_queue, vm_config=vm_config) + self.config_vm_env(combined=combined, rxq_txq=rxq_txq) def start_iperf(self, iperf_mode='tso'): """ @@ -332,11 +316,11 @@ class TestVM2VMVirtioNetPerf(TestCase): self.verify(tcp6_info is not None and tcp6_info.group(1) == "on", "tx-tcp6-segmentation in vm not right") - def check_scp_file_valid_between_vms(self, file_size=1024): + def check_scp_file_valid_between_vms(self, file_size=1): """ scp file form VM1 to VM2, check the data is valid """ - # default file_size=1024K + # default file_size=1K data = '' for char in range(file_size * 1024): data += random.choice(self.random_string) @@ -353,68 +337,63 @@ class TestVM2VMVirtioNetPerf(TestCase): md5_revd = md5_revd[: md5_revd.find(' ')] self.verify(md5_send == md5_revd, 'the received file is different with send file') + def bind_nic_driver(self, ports, driver=""): + if driver == "igb_uio": + for port in ports: + netdev = self.dut.ports_info[port]['port'] + driver = netdev.get_nic_driver() + if driver != 'igb_uio': + netdev.bind_driver(driver='igb_uio') + else: + for port in ports: + netdev = self.dut.ports_info[port]['port'] + driver_now = netdev.get_nic_driver() + if driver == "": + driver = netdev.default_driver + if driver != driver_now: + netdev.bind_driver(driver=driver) + def test_vm2vm_split_ring_iperf_with_tso(self): """ TestCase1: VM2VM split ring vhost-user/virtio-net test with tcp traffic """ - self.prepare_test_env(path_mode=1, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, - server_mode=False, opt_queue=1, combined=False, used_queues=1) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" + self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, + server_mode=False, opt_queue=1, combined=False, rxq_txq=None) self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') def test_vm2vm_split_ring_with_tso_and_cbdma_enable(self): """ TestCase2: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic """ + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - self.prepare_test_env(path_mode=1, cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2, - server_mode=False, opt_queue=1, combined=False, used_queues=1) + self.prepare_test_env(cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2, + server_mode=False, opt_queue=1, combined=False, rxq_txq=None) cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') expect_value = self.get_suite_cfg()['expected_throughput'][self.running_case] self.verify(cbdma_value > expect_value, "CBDMA enable performance: %s is lower than CBDMA disable: %s." %(cbdma_value, expect_value)) - def test_vm2vm_packed_ring_iperf_with_tso(self): - """ - TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic - """ - self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, - server_mode=False, opt_queue=1, combined=False, used_queues=1) - self.start_iperf_and_verify_vhost_xstats_info() - def test_vm2vm_split_ring_iperf_with_ufo(self): """ TestCase3: VM2VM split ring vhost-user/virtio-net test with udp traffic """ - self.prepare_test_env(path_mode=2, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=1, - server_mode=False, opt_queue=1, combined=False, used_queues=1) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" + self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=1, + server_mode=False, opt_queue=1, combined=False, rxq_txq=None) self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='ufo') - def test_vm2vm_packed_ring_iperf_with_ufo(self): - """ - TestCase8: VM2VM packed ring vhost-user/virtio-net test with udp traffic - """ - self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, - server_mode=False, opt_queue=1, combined=False, used_queues=1) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='other') - def test_vm2vm_split_ring_device_capbility(self): """ TestCase4: Check split ring virtio-net device capability """ - self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, used_queues=1) - self.start_vms(path_mode=2) - self.offload_capbility_check(self.vm_dut[0]) - self.offload_capbility_check(self.vm_dut[1]) - - def test_vm2vm_packed_ring_device_capbility(self): - """ - TestCase9: Check packed ring virtio-net device capability - """ - self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, used_queues=1) - self.start_vms(path_mode=4) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" + self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None) + self.start_vms() self.offload_capbility_check(self.vm_dut[0]) self.offload_capbility_check(self.vm_dut[1]) - def test_vm2vm_split_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self): + def test_vm2vm_split_ring_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self): """ TestCase5: VM2VM virtio-net split ring mergeable CBDMA enable test with large packet payload valid check """ @@ -423,24 +402,25 @@ class TestVM2VMVirtioNetPerf(TestCase): self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.prepare_test_env(path_mode=5, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, - server_mode=True, opt_queue=8, combined=True, used_queues=8) - self.check_scp_file_valid_between_vms(file_size=1024) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" + self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, + server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm') + self.check_scp_file_valid_between_vms() iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue]) self.logger.info("Re-launch without CBDMA and used 8 queue") self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=8) - self.check_scp_file_valid_between_vms(file_size=1024) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8) + self.check_scp_file_valid_between_vms() iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') ipef_result.append(['Disable','mergeable path', 8, iperf_data_cbdma_disable_8_queue]) self.logger.info("Re-launch without CBDMA and used 1 queue") self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=1) - self.config_vm_env(combined=True, used_queues=1) - self.check_scp_file_valid_between_vms(file_size=1024) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1) + self.config_vm_env(combined=True, rxq_txq=1) + self.check_scp_file_valid_between_vms() iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue]) @@ -453,7 +433,7 @@ class TestVM2VMVirtioNetPerf(TestCase): "CMDMA enable: %s is lower than CBDMA disable: %s" % ( iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue)) - def test_vm2vm_split_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self): + def test_vm2vm_split_ring_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self): """ TestCase6: VM2VM virtio-net split ring non-mergeable CBDMA enable test with large packet payload valid check """ @@ -462,24 +442,25 @@ class TestVM2VMVirtioNetPerf(TestCase): self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.prepare_test_env(path_mode=6, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, - server_mode=True, opt_queue=8, combined=True, used_queues=8) - self.check_scp_file_valid_between_vms(file_size=1024) + self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" + self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, + server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm') + self.check_scp_file_valid_between_vms() iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') ipef_result.append(['Enable', 'no-mergeable path', 8, iperf_data_cbdma_enable_8_queue]) self.logger.info("Re-launch without CBDMA and used 8 queue") self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=8) - self.check_scp_file_valid_between_vms(file_size=1024) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8) + self.check_scp_file_valid_between_vms() iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') ipef_result.append(['Disable','no-mergeable path', 8, iperf_data_cbdma_disable_8_queue]) self.logger.info("Re-launch without CBDMA and used 1 queue") self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=1) - self.config_vm_env(combined=True, used_queues=1) - self.check_scp_file_valid_between_vms(file_size=1024) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1) + self.config_vm_env(combined=True, rxq_txq=1) + self.check_scp_file_valid_between_vms() iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') ipef_result.append(['Disable','no-mergeable path', 1, iperf_data_cbdma_disable_1_queue]) @@ -492,21 +473,123 @@ class TestVM2VMVirtioNetPerf(TestCase): "CMDMA enable: %s is lower than CBDMA disable: %s" % ( iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue)) + def test_vm2vm_packed_ring_iperf_with_tso(self): + """ + TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic + """ + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" + self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, + server_mode=False, opt_queue=1, combined=False, rxq_txq=None) + self.start_iperf_and_verify_vhost_xstats_info() + + def test_vm2vm_packed_ring_iperf_with_tso_and_cbdma_enable(self): + """ + TestCase8: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic + """ + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" + self.prepare_test_env(cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2, + server_mode=False, opt_queue=None, combined=False, rxq_txq=None) + self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='other') + + def test_vm2vm_packed_ring_device_capbility(self): + """ + TestCase9: Check packed ring virtio-net device capability + """ + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" + self.prepare_test_env(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, + server_mode=False, opt_queue=None, combined=False, rxq_txq=None) + self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='ufo') + def test_vm2vm_packed_ring_mergeable_path_check_large_packet(self): """ TestCase10: VM2VM packed ring virtio-net mergeable with large packet payload valid check """ - self.prepare_test_env(path_mode=10, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4, - server_mode=True, opt_queue=8, combined=True, used_queues=8) - self.check_scp_file_valid_between_vms(file_size=1024) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" + self.start_vhost_testpmd(cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None) + self.start_vms() + self.offload_capbility_check(self.vm_dut[0]) + self.offload_capbility_check(self.vm_dut[1]) + + def test_vm2vm_packed_ring_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self): + """ + Test Case 11: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check + """ + # This test case need to use QEMU 3.0 to test + ipef_result = [] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) + + self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") + self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" + self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, + server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm') + self.check_scp_file_valid_between_vms() + iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') + ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue]) + + self.logger.info("Re-launch without CBDMA and used 8 queue") + self.vhost.send_expect("quit", "# ", 30) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8) + self.check_scp_file_valid_between_vms() + iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') + ipef_result.append(['Disable', 'mergeable path', 8, iperf_data_cbdma_disable_8_queue]) + + self.logger.info("Re-launch without CBDMA and used 1 queue") + self.vhost.send_expect("quit", "# ", 30) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1) + self.config_vm_env(combined=True, rxq_txq=1) + self.check_scp_file_valid_between_vms() + iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') + ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue]) + + self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', 'Gbits/sec'] + self.result_table_create(self.table_header) + for table_row in ipef_result: + self.result_table_add(table_row) + self.result_table_print() + self.verify(iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue, \ + "CMDMA enable: %s is lower than CBDMA disable: %s" % ( + iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue)) - def test_vm2vm_packed_ring_no_mergeable_path_check_large_packet(self): + def test_vm2vm_packed_ring_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(self): """ - TestCase11: VM2VM packed ring virtio-net non-mergeable with large packet payload valid check + Test Case 12: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check """ - self.prepare_test_env(path_mode=11, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4, - server_mode=True, opt_queue=8, combined=True, used_queues=8) - self.check_scp_file_valid_between_vms(file_size=1024) + # This test case need to use QEMU 3.0 to test + ipef_result = [] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) + + self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") + self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" + self.prepare_test_env(cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, + server_mode=True, opt_queue=8, combined=True, rxq_txq=8, vm_config='vm') + self.check_scp_file_valid_between_vms() + iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') + ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue]) + + self.logger.info("Re-launch without CBDMA and used 8 queue") + self.vhost.send_expect("quit", "# ", 30) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=8) + self.check_scp_file_valid_between_vms() + iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') + ipef_result.append(['Disable', 'mergeable path', 8, iperf_data_cbdma_disable_8_queue]) + + self.logger.info("Re-launch without CBDMA and used 1 queue") + self.vhost.send_expect("quit", "# ", 30) + self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, rxq_txq=1) + self.config_vm_env(combined=True, rxq_txq=1) + self.check_scp_file_valid_between_vms() + iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso') + ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue]) + + self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', 'Gbits/sec'] + self.result_table_create(self.table_header) + for table_row in ipef_result: + self.result_table_add(table_row) + self.result_table_print() + self.verify(iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue, \ + "CMDMA enable: %s is lower than CBDMA disable: %s" % ( + iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue)) def tear_down(self): """ @@ -520,5 +603,6 @@ class TestVM2VMVirtioNetPerf(TestCase): """ Run after each test suite. """ + self.bind_nic_driver(self.dut_ports, self.drivername) if getattr(self, 'vhost', None): self.dut.close_session(self.vhost) -- 2.25.1