From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 124EAA0509; Wed, 6 Apr 2022 11:10:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0592E40DF6; Wed, 6 Apr 2022 11:10:22 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id D3DC840689 for ; Wed, 6 Apr 2022 11:10:18 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649236220; x=1680772220; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=pKlsJEGktI3u+xVnxt2BNiwtAzhITJR5BF641VSJJq8=; b=DsMPpy07Sra14WYqeN3QK6ARHO3dFAv/wyIqO7lIwgTp5+9efnbGLHWL We13yv5w0wQJq+Wx/qB33kLMg/Bk9s030Wqx+uyY7XK+FoEz08RoNPbJm 0l3Ykgy8phi5Cu4UkOqC1qIddLxwDoqmzGybZkf951X8DB3bq8yV2+36o EI+UNA56g5ZoVJ4yDXeFdbX+YTNRRrQ4sPf5SLEzmv+L7A5c2Dk5OTe/I BVNtt4T16XToTMCAY3z+1SYKZTIU40EcNYNeN3RuHlm6+KKpcLvNC6xlr 0mKP9jGCW72Az0BtJrOPR7gHKGzHkZ9bksKbyhePTQ/3Uejnoz7ivjfbv w==; X-IronPort-AV: E=McAfee;i="6200,9189,10308"; a="260987063" X-IronPort-AV: E=Sophos;i="5.90,239,1643702400"; d="scan'208";a="260987063" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Apr 2022 02:10:08 -0700 X-IronPort-AV: E=Sophos;i="5.90,239,1643702400"; d="scan'208";a="570424900" Received: from unknown (HELO localhost.localdomain) ([10.239.251.222]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Apr 2022 02:10:05 -0700 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/5] tests/vm2vm_virtio_net_perf: delete CBDMA test case Date: Wed, 6 Apr 2022 17:09:58 +0800 Message-Id: <20220406090958.28325-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org As commit 53d3f4778c(vhost: integrate dmadev in asynchronous data-path), delete cbdma related cases form tests/vm2vm_virtio_net_perf. Signed-off-by: Wei Ling --- tests/TestSuite_vm2vm_virtio_net_perf.py | 793 ++--------------------- 1 file changed, 44 insertions(+), 749 deletions(-) diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py index 486f1acf..8c234c24 100644 --- a/tests/TestSuite_vm2vm_virtio_net_perf.py +++ b/tests/TestSuite_vm2vm_virtio_net_perf.py @@ -38,7 +38,6 @@ vm2vm split ring and packed ring vhost-user/virtio-net check the payload of larg mergeable and non-mergeable dequeue zero copy. please use qemu version greater 4.1.94 which support packed feathur to test this suite. """ -import random import re import string import time @@ -71,12 +70,6 @@ class TestVM2VMVirtioNetPerf(TestCase): self.vhost = self.dut.new_session(suite="vhost") self.pmd_vhost = PmdOutput(self.dut, self.vhost) self.app_testpmd_path = self.dut.apps_name["test-pmd"] - # get cbdma device - self.cbdma_dev_infos = [] - self.dmas_info = None - self.device_str = None - self.checked_vm = False - self.dut.restore_interfaces() def set_up(self): """ @@ -86,158 +79,29 @@ class TestVM2VMVirtioNetPerf(TestCase): self.vm_dut = [] self.vm = [] - def get_cbdma_ports_info_and_bind_to_dpdk( - self, cbdma_num=2, allow_diff_socket=False - ): - """ - get all cbdma ports - """ - out = self.dut.send_expect( - "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 - ) - device_info = out.split("\n") - for device in device_info: - pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device) - if pci_info is not None: - dev_info = pci_info.group(1) - # the numa id of ioat dev, only add the device which on same socket with nic dev - bus = int(dev_info[5:7], base=16) - if bus >= 128: - cur_socket = 1 - else: - cur_socket = 0 - if allow_diff_socket: - self.cbdma_dev_infos.append(pci_info.group(1)) - else: - if self.ports_socket == cur_socket: - self.cbdma_dev_infos.append(pci_info.group(1)) - self.verify( - len(self.cbdma_dev_infos) >= cbdma_num, - "There no enough cbdma device to run this suite", - ) - used_cbdma = self.cbdma_dev_infos[0:cbdma_num] - dmas_info = "" - for dmas in used_cbdma[0 : int(cbdma_num / 2)]: - number = used_cbdma[0 : int(cbdma_num / 2)].index(dmas) - dmas = "txq{}@{},".format(number, dmas) - dmas_info += dmas - for dmas in used_cbdma[int(cbdma_num / 2) :]: - number = used_cbdma[int(cbdma_num / 2) :].index(dmas) - dmas = "txq{}@{},".format(number, dmas) - dmas_info += dmas - self.dmas_info = dmas_info[:-1] - self.device_str = " ".join(used_cbdma) - self.dut.send_expect( - "./usertools/dpdk-devbind.py --force --bind=%s %s" - % (self.drivername, self.device_str), - "# ", - 60, - ) - - def bind_cbdma_device_to_kernel(self): - if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( - "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 - ) - self.dut.send_expect( - "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" - % self.device_str, - "# ", - 60, - ) - - @property - def check_2m_env(self): - out = self.dut.send_expect( - "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " - ) - return True if out == "2048" else False - def start_vhost_testpmd( self, - cbdma=False, no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - rxq_txq=None, - exchange_cbdma=False, - iova_mode="", ): """ launch the testpmd with different parameters """ - if cbdma is True: - dmas_info_list = self.dmas_info.split(",") - cbdma_arg_0_list = [] - cbdma_arg_1_list = [] - for item in dmas_info_list: - if dmas_info_list.index(item) < int(len(dmas_info_list) / 2): - cbdma_arg_0_list.append(item) - else: - cbdma_arg_1_list.append(item) - cbdma_arg_0 = ",dmas=[{}]".format(";".join(cbdma_arg_0_list)) - cbdma_arg_1 = ",dmas=[{}]".format(";".join(cbdma_arg_1_list)) - else: - cbdma_arg_0 = "" - cbdma_arg_1 = "" testcmd = self.app_testpmd_path + " " - if not client_mode: - vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,queues=%d%s' " % ( - self.base_dir, - enable_queues, - cbdma_arg_0, - ) - vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,queues=%d%s' " % ( - self.base_dir, - enable_queues, - cbdma_arg_1, - ) - else: - vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' " % ( - self.base_dir, - enable_queues, - cbdma_arg_0, - ) - vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' " % ( - self.base_dir, - enable_queues, - cbdma_arg_1, - ) - if exchange_cbdma: - vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' " % ( - self.base_dir, - enable_queues, - cbdma_arg_1, - ) - vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' " % ( - self.base_dir, - enable_queues, - cbdma_arg_0, - ) - + vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,queues=1' " % ( + self.base_dir + ) + vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,queues=1' " % ( + self.base_dir + ) eal_params = self.dut.create_eal_parameters( cores=self.cores_list, prefix="vhost", no_pci=no_pci ) - if rxq_txq is None: - params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024" % nb_cores - else: - params = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % ( - nb_cores, - rxq_txq, - rxq_txq, - ) - if iova_mode: - iova_parm = " --iova=" + iova_mode - else: - iova_parm = "" - self.command_line = testcmd + eal_params + vdev1 + vdev2 + iova_parm + params + params = " -- -i --nb-cores=2 --txd=1024 --rxd=1024" + self.command_line = testcmd + eal_params + vdev1 + vdev2 + params self.pmd_vhost.execute_cmd(self.command_line, timeout=30) - self.pmd_vhost.execute_cmd("vhost enable tx all", timeout=30) self.pmd_vhost.execute_cmd("start", timeout=30) - def start_vms(self, server_mode=False, opt_queue=None, vm_config="vhost_sample"): + def start_vms(self, vm_config="vhost_sample"): """ start two VM, each VM has one virtio device """ @@ -246,12 +110,7 @@ class TestVM2VMVirtioNetPerf(TestCase): vm_info = VM(self.dut, "vm%d" % i, vm_config) vm_params = {} vm_params["driver"] = "vhost-user" - if not server_mode: - vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i - else: - vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i + ",server" - if opt_queue is not None: - vm_params["opt_queue"] = opt_queue + vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i vm_params["opt_mac"] = "52:54:00:00:00:0%d" % (i + 1) vm_params["opt_settings"] = self.vm_args vm_info.set_vm_device(**vm_params) @@ -265,23 +124,15 @@ class TestVM2VMVirtioNetPerf(TestCase): self.vm_dut.append(vm_dut) self.vm.append(vm_info) - def config_vm_env(self, combined=False, rxq_txq=1): + def config_vm_env(self): """ set virtio device IP and run arp protocal """ vm1_intf = self.vm_dut[0].ports_info[0]["intf"] vm2_intf = self.vm_dut[1].ports_info[0]["intf"] - if combined: - self.vm_dut[0].send_expect( - "ethtool -L %s combined %d" % (vm1_intf, rxq_txq), "#", 10 - ) self.vm_dut[0].send_expect( "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10 ) - if combined: - self.vm_dut[1].send_expect( - "ethtool -L %s combined %d" % (vm2_intf, rxq_txq), "#", 10 - ) self.vm_dut[1].send_expect( "ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10 ) @@ -292,87 +143,22 @@ class TestVM2VMVirtioNetPerf(TestCase): "arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10 ) - def prepare_test_env( - self, - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=None, - combined=False, - rxq_txq=None, - iova_mode="", - ): - """ - start vhost testpmd and qemu, and config the vm env - """ - self.start_vhost_testpmd( - cbdma=cbdma, - no_pci=no_pci, - client_mode=client_mode, - enable_queues=enable_queues, - nb_cores=nb_cores, - rxq_txq=rxq_txq, - iova_mode=iova_mode, - ) - self.start_vms(server_mode=server_mode, opt_queue=opt_queue) - self.config_vm_env(combined=combined, rxq_txq=rxq_txq) - def start_iperf(self, iperf_mode="tso"): """ run perf command between to vms """ # clear the port xstats before iperf self.vhost.send_expect("clear port xstats all", "testpmd> ", 10) - - # add -f g param, use Gbits/sec report teste result if iperf_mode == "tso": - iperf_server = "iperf -s -i 1" - iperf_client = "iperf -c 1.1.1.2 -i 1 -t 60" + server = "iperf -s -i 1" + client = "iperf -c 1.1.1.2 -i 1 -t 60" elif iperf_mode == "ufo": - iperf_server = "iperf -s -u -i 1" - iperf_client = "iperf -c 1.1.1.2 -i 1 -t 30 -P 4 -u -b 1G -l 9000" - self.vm_dut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10) - self.vm_dut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60) + server = "iperf -s -u -i 1" + client = "iperf -c 1.1.1.2 -i 1 -t 60 -P 4 -u -b 1G -l 9000" + self.vm_dut[0].send_expect("%s > iperf_server.log &" % server, "", 10) + self.vm_dut[1].send_expect("%s > iperf_client.log &" % client, "", 10) time.sleep(90) - def get_perf_result(self): - """ - get the iperf test result - """ - self.table_header = ["Mode", "[M|G]bits/sec"] - self.result_table_create(self.table_header) - self.vm_dut[0].send_expect("pkill iperf", "# ") - self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir) - fp = open("./iperf_client.log") - fmsg = fp.read() - fp.close() - # remove the server report info from msg - index = fmsg.find("Server Report") - if index != -1: - fmsg = fmsg[:index] - iperfdata = re.compile("\S*\s*[M|G]bits/sec").findall(fmsg) - # the last data of iperf is the ave data from 0-30 sec - self.verify(len(iperfdata) != 0, "The iperf data between to vms is 0") - self.verify( - (iperfdata[-1]).split()[1] == "Gbits/sec", - "The iperf data is %s,Can't reach Gbits/sec" % iperfdata[-1], - ) - self.logger.info("The iperf data between vms is %s" % iperfdata[-1]) - - # put the result to table - results_row = ["vm2vm", iperfdata[-1]] - self.result_table_add(results_row) - - # print iperf resut - self.result_table_print() - # rm the iperf log file in vm - self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10) - return float(iperfdata[-1].split()[0]) - def verify_xstats_info_on_vhost(self): """ check both 2VMs can receive and send big packets to each other @@ -390,16 +176,6 @@ class TestVM2VMVirtioNetPerf(TestCase): int(tx_info.group(1)) > 0, "Port 0 not forward packet greater than 1522" ) - def start_iperf_and_verify_vhost_xstats_info(self, iperf_mode="tso"): - """ - start to send packets and verify vm can received data of iperf - and verify the vhost can received big pkts in testpmd - """ - self.start_iperf(iperf_mode) - iperfdata = self.get_perf_result() - self.verify_xstats_info_on_vhost() - return iperfdata - def stop_all_apps(self): for i in range(len(self.vm)): self.vm[i].stop() @@ -434,557 +210,76 @@ class TestVM2VMVirtioNetPerf(TestCase): "tx-tcp6-segmentation in vm not right", ) - def check_scp_file_valid_between_vms(self, file_size=1024): - """ - scp file form VM1 to VM2, check the data is valid - """ - # default file_size=1024K - data = "" - for char in range(file_size * 1024): - data += random.choice(self.random_string) - self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") - # scp this file to vm1 - out = self.vm_dut[1].send_command( - "scp root@%s:/tmp/payload /root" % self.virtio_ip1, timeout=5 - ) - if "Are you sure you want to continue connecting" in out: - self.vm_dut[1].send_command("yes", timeout=3) - self.vm_dut[1].send_command(self.vm[0].password, timeout=3) - # get the file info in vm1, and check it valid - md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ") - md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ") - md5_send = md5_send[: md5_send.find(" ")] - md5_revd = md5_revd[: md5_revd.find(" ")] - self.verify( - md5_send == md5_revd, "the received file is different with send file" - ) - def test_vm2vm_split_ring_iperf_with_tso(self): """ TestCase1: VM2VM split ring vhost-user/virtio-net test with tcp traffic """ self.vm_args = "disable-modern=false,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" - self.prepare_test_env( - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - ) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - - def test_vm2vm_split_ring_with_tso_and_cbdma_enable(self): - """ - TestCase2: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic - """ - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - ) - cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - expect_value = self.get_suite_cfg()["expected_throughput"][ - "test_vm2vm_split_ring_iperf_with_tso" - ] - self.verify( - cbdma_value > expect_value, - "CBDMA enable performance: %s is lower than CBDMA disable: %s." - % (cbdma_value, expect_value), - ) + self.start_vhost_testpmd() + self.start_vms() + self.config_vm_env() + self.start_iperf(iperf_mode='tso') + self.verify_xstats_info_on_vhost() def test_vm2vm_split_ring_iperf_with_ufo(self): """ - TestCase3: VM2VM split ring vhost-user/virtio-net test with udp traffic + TestCase2: VM2VM split ring vhost-user/virtio-net test with udp traffic """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - self.prepare_test_env( - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=1, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - ) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="ufo") + self.start_vhost_testpmd() + self.start_vms() + self.config_vm_env() + self.start_iperf(iperf_mode='ufo') + self.verify_xstats_info_on_vhost() def test_vm2vm_split_ring_device_capbility(self): """ - TestCase4: Check split ring virtio-net device capability + TestCase3: Check split ring virtio-net device capability """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - self.start_vhost_testpmd( - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - rxq_txq=None, - ) + self.start_vhost_testpmd() self.start_vms() self.offload_capbility_check(self.vm_dut[0]) self.offload_capbility_check(self.vm_dut[1]) - def test_vm2vm_split_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - TestCase5: VM2VM virtio-net split ring mergeable CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost with CBDMA and with 8 queue with VA mode") - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - server_mode=True, - opt_queue=8, - combined=True, - rxq_txq=8, - iova_mode="va", - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info( - iperf_mode="tso" - ) - ipef_result.append( - [ - "Enable", - "mergeable path with VA mode", - 8, - iperf_data_cbdma_enable_8_queue, - ] - ) - - self.logger.info("Re-launch and exchange CBDMA and with 8 queue with VA mode") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=8, - exchange_cbdma=True, - iova_mode="va", - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue_exchange = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path exchange CBDMA with VA mode", - 8, - iperf_data_cbdma_enable_8_queue_exchange, - ] - ) - - # This test step need to test on 1G guest hugepage ENV. - if not self.check_2m_env: - self.logger.info( - "Re-launch and exchange CBDMA and with 8 queue with PA mode" - ) - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=8, - exchange_cbdma=True, - iova_mode="pa", - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue_exchange_pa = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path exchange CBDMA with PA mode", - 8, - iperf_data_cbdma_enable_8_queue_exchange_pa, - ] - ) - - self.logger.info("Re-launch without CBDMA and with 4 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=4, - nb_cores=4, - rxq_txq=4, - ) - self.config_vm_env(combined=True, rxq_txq=4) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_4_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path without CBDMA with 4 queue", - 4, - iperf_data_cbdma_disable_4_queue, - ] - ) - - self.logger.info("Re-launch without CBDMA and with 1 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=4, - nb_cores=4, - rxq_txq=1, - ) - self.config_vm_env(combined=True, rxq_txq=1) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_1_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path without CBDMA with 1 queue", - 1, - iperf_data_cbdma_disable_1_queue, - ] - ) - - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - self.verify( - iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_4_queue, - "CMDMA enable: %s is lower than CBDMA disable: %s" - % (iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_4_queue), - ) - - def test_vm2vm_split_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - TestCase6: VM2VM virtio-net split ring non-mergeable CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - server_mode=True, - opt_queue=8, - combined=True, - rxq_txq=8, - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info( - iperf_mode="tso" - ) - ipef_result.append( - ["Enable", "no-mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - - self.logger.info("Re-launch without CBDMA and used 8 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=8, - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Disable", "no-mergeable path", 8, iperf_data_cbdma_disable_8_queue] - ) - - self.logger.info("Re-launch without CBDMA and used 1 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=1, - ) - self.config_vm_env(combined=True, rxq_txq=1) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_1_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Disable", "no-mergeable path", 1, iperf_data_cbdma_disable_1_queue] - ) - - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - self.verify( - iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue, - "CMDMA enable: %s is lower than CBDMA disable: %s" - % (iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue), - ) - def test_vm2vm_packed_ring_iperf_with_tso(self): """ - TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic - """ - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.prepare_test_env( - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - ) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - - def test_vm2vm_packed_ring_iperf_with_tso_and_cbdma_enable(self): - """ - TestCase8: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic + TestCase4: VM2VM packed ring vhost-user/virtio-net test with tcp traffic """ - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=None, - combined=False, - rxq_txq=None, - ) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") + self.start_vhost_testpmd() + self.start_vms() + self.config_vm_env() + self.start_iperf(iperf_mode='tso') + self.verify_xstats_info_on_vhost() def test_vm2vm_packed_ring_iperf_with_ufo(self): """ - Test Case 9: VM2VM packed ring vhost-user/virtio-net test with udp trafficc + Test Case 5: VM2VM packed ring vhost-user/virtio-net test with udp trafficc """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.prepare_test_env( - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=None, - combined=False, - rxq_txq=None, - ) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="ufo") + self.start_vhost_testpmd() + self.start_vms() + self.config_vm_env() + self.start_iperf(iperf_mode='ufo') + self.verify_xstats_info_on_vhost() def test_vm2vm_packed_ring_device_capbility(self): """ - Test Case 10: Check packed ring virtio-net device capability + Test Case 6: Check packed ring virtio-net device capability """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.start_vhost_testpmd( - cbdma=False, - no_pci=True, - client_mode=False, - enable_queues=1, - nb_cores=2, - rxq_txq=None, - ) + self.start_vhost_testpmd() self.start_vms() self.offload_capbility_check(self.vm_dut[0]) self.offload_capbility_check(self.vm_dut[1]) - def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - Test Case 11: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=8, - nb_cores=4, - server_mode=False, - opt_queue=8, - combined=True, - rxq_txq=8, - ) - for i in range(0, 5): - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Enable_%d" % i, "mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - - def test_vm2vm_packed_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - Test Case 12: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=8, - nb_cores=4, - server_mode=False, - opt_queue=8, - combined=True, - rxq_txq=8, - ) - for i in range(0, 5): - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Enable", "mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - - def test_vm2vm_packed_ring_with_tso_and_cbdma_enable_iova_pa(self): - """ - Test Case 13: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic when set iova=pa - """ - # This test case need to test on 1G guest hugepage ENV. - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - iova_mode="pa", - ) - self.check_scp_file_valid_between_vms() - cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - expect_value = self.get_suite_cfg()["expected_throughput"][ - "test_vm2vm_split_ring_iperf_with_tso" - ] - self.verify( - cbdma_value > expect_value, - "CBDMA enable performance: %s is lower than CBDMA disable: %s." - % (cbdma_value, expect_value), - ) - - def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue_iova_pa( - self, - ): - """ - Test Case 14: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable and PA mode test with large packet payload valid check - """ - # This test case need to test on 1G guest hugepage ENV. - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=8, - nb_cores=4, - server_mode=False, - opt_queue=8, - combined=True, - rxq_txq=8, - iova_mode="pa", - ) - for i in range(0, 5): - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Enable_%d" % i, "mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - def tear_down(self): """ run after each test case. """ self.stop_all_apps() self.dut.kill_all() - self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ -- 2.25.1