From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3492FA0503; Thu, 19 May 2022 09:40:04 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2D6DA40DF7; Thu, 19 May 2022 09:40:04 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id A261D40140 for ; Thu, 19 May 2022 09:40:01 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1652946001; x=1684482001; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=kxVu9vz6Qmpm9BjXi/wlobLKiu55tYgNH/2WwUJEnUg=; b=GGXZw0C3MPoxGSkY+H+sHn1DQykDc/lJJADcw4P5G7xoDE2juGBIMsvf BbJTGkuemuKHOAop2hRhnGQDTb3AnubhsKeU1QmH9JDswkEbl1f4gNqvk ugw4Qd1Uc6e5kQTHR2wnrHwYyeAttSc6GJA12gTL/4tpQCq/EWeyXgod8 lryeg4NXoGDu6LSgn0bO2o4s5u2As1OmJlxFXOS7fwH7xbFFfVofN5kvk /qh/Vi8kgRwKPQO5Mi7OaXayPccOKAgAf6354410elQk1OWl6XCcooGy0 YSIrUPMITSxDy/k3c0wQVcbnPqO8+0cQIfNjIy21R/kN6r+rToDoWn1pK w==; X-IronPort-AV: E=McAfee;i="6400,9594,10351"; a="254121354" X-IronPort-AV: E=Sophos;i="5.91,237,1647327600"; d="scan'208";a="254121354" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 May 2022 00:39:52 -0700 X-IronPort-AV: E=Sophos;i="5.91,237,1647327600"; d="scan'208";a="714860051" Received: from unknown (HELO localhost.localdomain) ([10.239.251.222]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 May 2022 00:39:50 -0700 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/2] tests/vm2vm_virtio_net_perf: delete CBDMA related testcases and code Date: Thu, 19 May 2022 03:38:29 -0400 Message-Id: <20220519073829.2814999-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Delete CBDMA related testcases and code. Signed-off-by: Wei Ling --- tests/TestSuite_vm2vm_virtio_net_perf.py | 534 +---------------------- 1 file changed, 13 insertions(+), 521 deletions(-) diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py index 486f1acf..12984b89 100644 --- a/tests/TestSuite_vm2vm_virtio_net_perf.py +++ b/tests/TestSuite_vm2vm_virtio_net_perf.py @@ -86,67 +86,6 @@ class TestVM2VMVirtioNetPerf(TestCase): self.vm_dut = [] self.vm = [] - def get_cbdma_ports_info_and_bind_to_dpdk( - self, cbdma_num=2, allow_diff_socket=False - ): - """ - get all cbdma ports - """ - out = self.dut.send_expect( - "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 - ) - device_info = out.split("\n") - for device in device_info: - pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device) - if pci_info is not None: - dev_info = pci_info.group(1) - # the numa id of ioat dev, only add the device which on same socket with nic dev - bus = int(dev_info[5:7], base=16) - if bus >= 128: - cur_socket = 1 - else: - cur_socket = 0 - if allow_diff_socket: - self.cbdma_dev_infos.append(pci_info.group(1)) - else: - if self.ports_socket == cur_socket: - self.cbdma_dev_infos.append(pci_info.group(1)) - self.verify( - len(self.cbdma_dev_infos) >= cbdma_num, - "There no enough cbdma device to run this suite", - ) - used_cbdma = self.cbdma_dev_infos[0:cbdma_num] - dmas_info = "" - for dmas in used_cbdma[0 : int(cbdma_num / 2)]: - number = used_cbdma[0 : int(cbdma_num / 2)].index(dmas) - dmas = "txq{}@{},".format(number, dmas) - dmas_info += dmas - for dmas in used_cbdma[int(cbdma_num / 2) :]: - number = used_cbdma[int(cbdma_num / 2) :].index(dmas) - dmas = "txq{}@{},".format(number, dmas) - dmas_info += dmas - self.dmas_info = dmas_info[:-1] - self.device_str = " ".join(used_cbdma) - self.dut.send_expect( - "./usertools/dpdk-devbind.py --force --bind=%s %s" - % (self.drivername, self.device_str), - "# ", - 60, - ) - - def bind_cbdma_device_to_kernel(self): - if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( - "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 - ) - self.dut.send_expect( - "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" - % self.device_str, - "# ", - 60, - ) - @property def check_2m_env(self): out = self.dut.send_expect( @@ -156,65 +95,43 @@ class TestVM2VMVirtioNetPerf(TestCase): def start_vhost_testpmd( self, - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, rxq_txq=None, exchange_cbdma=False, - iova_mode="", ): """ launch the testpmd with different parameters """ - if cbdma is True: - dmas_info_list = self.dmas_info.split(",") - cbdma_arg_0_list = [] - cbdma_arg_1_list = [] - for item in dmas_info_list: - if dmas_info_list.index(item) < int(len(dmas_info_list) / 2): - cbdma_arg_0_list.append(item) - else: - cbdma_arg_1_list.append(item) - cbdma_arg_0 = ",dmas=[{}]".format(";".join(cbdma_arg_0_list)) - cbdma_arg_1 = ",dmas=[{}]".format(";".join(cbdma_arg_1_list)) - else: - cbdma_arg_0 = "" - cbdma_arg_1 = "" testcmd = self.app_testpmd_path + " " if not client_mode: - vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,queues=%d%s' " % ( + vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,queues=%d' " % ( self.base_dir, enable_queues, - cbdma_arg_0, ) - vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,queues=%d%s' " % ( + vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,queues=%d' " % ( self.base_dir, enable_queues, - cbdma_arg_1, ) else: - vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' " % ( + vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d' " % ( self.base_dir, enable_queues, - cbdma_arg_0, ) - vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' " % ( + vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d' " % ( self.base_dir, enable_queues, - cbdma_arg_1, ) if exchange_cbdma: - vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' " % ( + vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d' " % ( self.base_dir, enable_queues, - cbdma_arg_1, ) - vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' " % ( + vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d' " % ( self.base_dir, enable_queues, - cbdma_arg_0, ) eal_params = self.dut.create_eal_parameters( @@ -228,13 +145,8 @@ class TestVM2VMVirtioNetPerf(TestCase): rxq_txq, rxq_txq, ) - if iova_mode: - iova_parm = " --iova=" + iova_mode - else: - iova_parm = "" - self.command_line = testcmd + eal_params + vdev1 + vdev2 + iova_parm + params + self.command_line = testcmd + eal_params + vdev1 + vdev2 + params self.pmd_vhost.execute_cmd(self.command_line, timeout=30) - self.pmd_vhost.execute_cmd("vhost enable tx all", timeout=30) self.pmd_vhost.execute_cmd("start", timeout=30) def start_vms(self, server_mode=False, opt_queue=None, vm_config="vhost_sample"): @@ -309,13 +221,11 @@ class TestVM2VMVirtioNetPerf(TestCase): start vhost testpmd and qemu, and config the vm env """ self.start_vhost_testpmd( - cbdma=cbdma, no_pci=no_pci, client_mode=client_mode, enable_queues=enable_queues, nb_cores=nb_cores, rxq_txq=rxq_txq, - iova_mode=iova_mode, ) self.start_vms(server_mode=server_mode, opt_queue=opt_queue) self.config_vm_env(combined=combined, rxq_txq=rxq_txq) @@ -461,11 +371,10 @@ class TestVM2VMVirtioNetPerf(TestCase): def test_vm2vm_split_ring_iperf_with_tso(self): """ - TestCase1: VM2VM split ring vhost-user/virtio-net test with tcp traffic + Test Case 1: VM2VM split ring vhost-user/virtio-net test with tcp traffic """ self.vm_args = "disable-modern=false,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" self.prepare_test_env( - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, @@ -477,40 +386,12 @@ class TestVM2VMVirtioNetPerf(TestCase): ) self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - def test_vm2vm_split_ring_with_tso_and_cbdma_enable(self): - """ - TestCase2: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic - """ - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - ) - cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - expect_value = self.get_suite_cfg()["expected_throughput"][ - "test_vm2vm_split_ring_iperf_with_tso" - ] - self.verify( - cbdma_value > expect_value, - "CBDMA enable performance: %s is lower than CBDMA disable: %s." - % (cbdma_value, expect_value), - ) - def test_vm2vm_split_ring_iperf_with_ufo(self): """ - TestCase3: VM2VM split ring vhost-user/virtio-net test with udp traffic + Test Case 2: VM2VM split ring vhost-user/virtio-net test with udp traffic """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" self.prepare_test_env( - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, @@ -524,11 +405,10 @@ class TestVM2VMVirtioNetPerf(TestCase): def test_vm2vm_split_ring_device_capbility(self): """ - TestCase4: Check split ring virtio-net device capability + Test Case 3: Check split ring virtio-net device capability """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" self.start_vhost_testpmd( - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, @@ -539,240 +419,13 @@ class TestVM2VMVirtioNetPerf(TestCase): self.offload_capbility_check(self.vm_dut[0]) self.offload_capbility_check(self.vm_dut[1]) - def test_vm2vm_split_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - TestCase5: VM2VM virtio-net split ring mergeable CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost with CBDMA and with 8 queue with VA mode") - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - server_mode=True, - opt_queue=8, - combined=True, - rxq_txq=8, - iova_mode="va", - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info( - iperf_mode="tso" - ) - ipef_result.append( - [ - "Enable", - "mergeable path with VA mode", - 8, - iperf_data_cbdma_enable_8_queue, - ] - ) - - self.logger.info("Re-launch and exchange CBDMA and with 8 queue with VA mode") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=8, - exchange_cbdma=True, - iova_mode="va", - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue_exchange = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path exchange CBDMA with VA mode", - 8, - iperf_data_cbdma_enable_8_queue_exchange, - ] - ) - - # This test step need to test on 1G guest hugepage ENV. - if not self.check_2m_env: - self.logger.info( - "Re-launch and exchange CBDMA and with 8 queue with PA mode" - ) - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=8, - exchange_cbdma=True, - iova_mode="pa", - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue_exchange_pa = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path exchange CBDMA with PA mode", - 8, - iperf_data_cbdma_enable_8_queue_exchange_pa, - ] - ) - - self.logger.info("Re-launch without CBDMA and with 4 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=4, - nb_cores=4, - rxq_txq=4, - ) - self.config_vm_env(combined=True, rxq_txq=4) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_4_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path without CBDMA with 4 queue", - 4, - iperf_data_cbdma_disable_4_queue, - ] - ) - - self.logger.info("Re-launch without CBDMA and with 1 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=4, - nb_cores=4, - rxq_txq=1, - ) - self.config_vm_env(combined=True, rxq_txq=1) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_1_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - [ - "Disable", - "mergeable path without CBDMA with 1 queue", - 1, - iperf_data_cbdma_disable_1_queue, - ] - ) - - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - self.verify( - iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_4_queue, - "CMDMA enable: %s is lower than CBDMA disable: %s" - % (iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_4_queue), - ) - - def test_vm2vm_split_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - TestCase6: VM2VM virtio-net split ring non-mergeable CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - server_mode=True, - opt_queue=8, - combined=True, - rxq_txq=8, - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info( - iperf_mode="tso" - ) - ipef_result.append( - ["Enable", "no-mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - - self.logger.info("Re-launch without CBDMA and used 8 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=8, - ) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Disable", "no-mergeable path", 8, iperf_data_cbdma_disable_8_queue] - ) - - self.logger.info("Re-launch without CBDMA and used 1 queue") - self.vhost.send_expect("quit", "# ", 30) - self.start_vhost_testpmd( - cbdma=False, - no_pci=False, - client_mode=True, - enable_queues=8, - nb_cores=4, - rxq_txq=1, - ) - self.config_vm_env(combined=True, rxq_txq=1) - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_disable_1_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Disable", "no-mergeable path", 1, iperf_data_cbdma_disable_1_queue] - ) - - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - self.verify( - iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue, - "CMDMA enable: %s is lower than CBDMA disable: %s" - % (iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue), - ) def test_vm2vm_packed_ring_iperf_with_tso(self): """ - TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic + Test Case 4: VM2VM packed ring vhost-user/virtio-net test with tcp traffic """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" self.prepare_test_env( - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, @@ -784,32 +437,12 @@ class TestVM2VMVirtioNetPerf(TestCase): ) self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - def test_vm2vm_packed_ring_iperf_with_tso_and_cbdma_enable(self): - """ - TestCase8: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic - """ - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=None, - combined=False, - rxq_txq=None, - ) - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - def test_vm2vm_packed_ring_iperf_with_ufo(self): """ - Test Case 9: VM2VM packed ring vhost-user/virtio-net test with udp trafficc + Test Case 5: VM2VM packed ring vhost-user/virtio-net test with udp traffic """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" self.prepare_test_env( - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, @@ -823,11 +456,10 @@ class TestVM2VMVirtioNetPerf(TestCase): def test_vm2vm_packed_ring_device_capbility(self): """ - Test Case 10: Check packed ring virtio-net device capability + Test Case 6: Check packed ring virtio-net device capability """ self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" self.start_vhost_testpmd( - cbdma=False, no_pci=True, client_mode=False, enable_queues=1, @@ -838,146 +470,6 @@ class TestVM2VMVirtioNetPerf(TestCase): self.offload_capbility_check(self.vm_dut[0]) self.offload_capbility_check(self.vm_dut[1]) - def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - Test Case 11: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=8, - nb_cores=4, - server_mode=False, - opt_queue=8, - combined=True, - rxq_txq=8, - ) - for i in range(0, 5): - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Enable_%d" % i, "mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - - def test_vm2vm_packed_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue( - self, - ): - """ - Test Case 12: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check - """ - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=8, - nb_cores=4, - server_mode=False, - opt_queue=8, - combined=True, - rxq_txq=8, - ) - for i in range(0, 5): - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Enable", "mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - - def test_vm2vm_packed_ring_with_tso_and_cbdma_enable_iova_pa(self): - """ - Test Case 13: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic when set iova=pa - """ - # This test case need to test on 1G guest hugepage ENV. - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=1, - nb_cores=2, - server_mode=False, - opt_queue=1, - combined=False, - rxq_txq=None, - iova_mode="pa", - ) - self.check_scp_file_valid_between_vms() - cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - expect_value = self.get_suite_cfg()["expected_throughput"][ - "test_vm2vm_split_ring_iperf_with_tso" - ] - self.verify( - cbdma_value > expect_value, - "CBDMA enable performance: %s is lower than CBDMA disable: %s." - % (cbdma_value, expect_value), - ) - - def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue_iova_pa( - self, - ): - """ - Test Case 14: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable and PA mode test with large packet payload valid check - """ - # This test case need to test on 1G guest hugepage ENV. - ipef_result = [] - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - - self.logger.info("Launch vhost-testpmd with CBDMA and used 8 queue") - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.prepare_test_env( - cbdma=True, - no_pci=False, - client_mode=False, - enable_queues=8, - nb_cores=4, - server_mode=False, - opt_queue=8, - combined=True, - rxq_txq=8, - iova_mode="pa", - ) - for i in range(0, 5): - self.check_scp_file_valid_between_vms() - iperf_data_cbdma_enable_8_queue = ( - self.start_iperf_and_verify_vhost_xstats_info(iperf_mode="tso") - ) - ipef_result.append( - ["Enable_%d" % i, "mergeable path", 8, iperf_data_cbdma_enable_8_queue] - ) - self.table_header = ["CBDMA Enable/Disable", "Mode", "rxq/txq", "Gbits/sec"] - self.result_table_create(self.table_header) - for table_row in ipef_result: - self.result_table_add(table_row) - self.result_table_print() - def tear_down(self): """ run after each test case. -- 2.25.1