From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D481BA04FD; Fri, 29 Jul 2022 10:19:01 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CF19942C0B; Fri, 29 Jul 2022 10:19:01 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id BF80A40151 for ; Fri, 29 Jul 2022 10:18:59 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1659082739; x=1690618739; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=UCY+qMMXV0kKI0Kzi3jvPygtn1W8HesfwEZU8EPf32s=; b=ZLom2q4uyCnN2dgHc1TU/zhq7qvZkTruzJPjCuE5koG6TXAUvyCszYpg /lYu/kcn6bbnWlfcNrcEgK3AlO6gwj1alA/JWGAEWxlfLx4RJlJ+aVTy9 sm0v5y8qHM1a55V84JITfvTrjbv6CPjjT80amf1QMy2fVh/YuBI5qwtvM 9D1RIPjzqseImFyjj8Fg7u0b1Wx7Jkfy5MZnRMMljI9z2kRtXwhq6o07d g9s+OHMwgk/+RCw7BwoJJPsic9pBHLG+pzyXun3N3RlJstr30VnIPPbMt LrHoeixsIiF8v5gIytjPG5Kd2GmzlF+3QEMsOWeiS5QWQklj0pKXc5TK8 A==; X-IronPort-AV: E=McAfee;i="6400,9594,10422"; a="288734647" X-IronPort-AV: E=Sophos;i="5.93,200,1654585200"; d="scan'208,223";a="288734647" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Jul 2022 01:18:58 -0700 X-IronPort-AV: E=Sophos;i="5.93,200,1654585200"; d="scan'208,223";a="598187671" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Jul 2022 01:18:56 -0700 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/2] tests/vm2vm_virtio_net_perf_cbdma: modify testsuite to test virito dequeue Date: Fri, 29 Jul 2022 04:14:00 -0400 Message-Id: <20220729081400.1009107-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org >From DPDK-22.07, virtio support async dequeue for split and packed ring path, so modify vm2vm_virtio_net_perf_cbdma testsuite to test the split and packed ring async dequeue feature. Signed-off-by: Wei Ling --- .../TestSuite_vm2vm_virtio_net_perf_cbdma.py | 1007 +++++++++++------ 1 file changed, 663 insertions(+), 344 deletions(-) diff --git a/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py b/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py index 8dad7be5..723b25d2 100644 --- a/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py +++ b/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py @@ -1,6 +1,31 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2022 Intel Corporation + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. # +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ DPDK Test suite. @@ -87,43 +112,18 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): 60, ) - @staticmethod - def generate_dms_param(queues): - das_list = [] - for i in range(queues): - das_list.append("txq{}".format(i)) - das_param = "[{}]".format(";".join(das_list)) - return das_param - - @staticmethod - def generate_lcore_dma_param(cbdma_list, core_list): - group_num = int(len(cbdma_list) / len(core_list)) - lcore_dma_list = [] - if len(cbdma_list) == 1: - for core in core_list: - lcore_dma_list.append("lcore{}@{}".format(core, cbdma_list[0])) - elif len(core_list) == 1: - for cbdma in cbdma_list: - lcore_dma_list.append("lcore{}@{}".format(core_list[0], cbdma)) - else: - for cbdma in cbdma_list: - core_list_index = int(cbdma_list.index(cbdma) / group_num) - lcore_dma_list.append( - "lcore{}@{}".format(core_list[core_list_index], cbdma) - ) - lcore_dma_param = "[{}]".format(",".join(lcore_dma_list)) - return lcore_dma_param - def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( - "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 - ) - self.dut.send_expect( - "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, - "# ", - 60, - ) + if self.cbdma_str: + self.dut.send_expect("modprobe ioatdma", "# ") + self.dut.send_expect( + "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 + ) + self.dut.send_expect( + "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" + % self.cbdma_str, + "# ", + 60, + ) @property def check_2M_env(self): @@ -257,8 +257,8 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): out_tx = self.vhost.send_expect("show port xstats 0", "testpmd> ", 20) out_rx = self.vhost.send_expect("show port xstats 1", "testpmd> ", 20) - tx_info = re.search("tx_size_1523_to_max_packets:\s*(\d*)", out_tx) - rx_info = re.search("rx_size_1523_to_max_packets:\s*(\d*)", out_rx) + tx_info = re.search("tx_q0_size_1519_max_packets:\s*(\d*)", out_tx) + rx_info = re.search("rx_q0_size_1519_max_packets:\s*(\d*)", out_rx) self.verify( int(rx_info.group(1)) > 0, "Port 1 not receive packet greater than 1522" @@ -307,11 +307,11 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") # scp this file to vm1 out = self.vm_dut[1].send_command( - "scp root@%s:/tmp/payload /root" % self.virtio_ip1, timeout=5 + "scp root@%s:/tmp/payload /root" % self.virtio_ip1, timeout=10 ) if "Are you sure you want to continue connecting" in out: - self.vm_dut[1].send_command("yes", timeout=3) - self.vm_dut[1].send_command(self.vm[0].password, timeout=3) + self.vm_dut[1].send_command("yes", timeout=10) + self.vm_dut[1].send_command(self.vm[0].password, timeout=10) # get the file info in vm1, and check it valid md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ") md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ") @@ -321,23 +321,24 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): md5_send == md5_revd, "the received file is different with send file" ) - def test_vm2vm_split_ring_iperf_with_tso_and_cbdma_enable(self): + def test_vm2vm_virtiio_net_split_ring_cbdma_enable_test_with_tcp_traffic(self): """ - Test Case 1: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic + Test Case 1: VM2VM virtio-net split ring CBDMA enable test with tcp traffic """ - self.get_cbdma_ports_info_and_bind_to_dpdk(2) - dmas = self.generate_dms_param(1) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:3] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) + lcore_dma = "lcore%s@%s," "lcore%s@%s" % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[1], ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,dmas={},dma_ring_size=2048'".format( - dmas + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=1,tso=1,dmas=[txq0;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,tso=1,dmas=[txq0;rxq0]'" ) param = ( " --nb-cores=2 --txd=1024 --rxd=1024 --txq=1 --rxq=1" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -354,53 +355,72 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.get_perf_result() self.verify_xstats_info_on_vhost() - def test_vm2vm_split_ring_with_mergeable_path_8queue_check_large_packet_and_cbdma_enable( + def test_vm2vm_virtio_net_split_ring_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check( self, ): """ - Test Case 2: VM2VM split ring vhost-user/virtio-net mergeable 8 queues CBDMA enable test with large packet payload valid check + Test Case 2: VM2VM virtio-net split ring mergeable 8 queues CBDMA enable test with large packet payload valid check """ self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - dmas = self.generate_dms_param(8) - core1 = self.vhost_core_list[1] - core2 = self.vhost_core_list[2] - core3 = self.vhost_core_list[3] - core4 = self.vhost_core_list[4] - cbdma1 = self.cbdma_list[0] - cbdma2 = self.cbdma_list[1] - cbdma3 = self.cbdma_list[2] - cbdma4 = self.cbdma_list[3] - cbdma5 = self.cbdma_list[4] - cbdma6 = self.cbdma_list[5] - cbdma7 = self.cbdma_list[6] - cbdma8 = self.cbdma_list[7] - cbdma9 = self.cbdma_list[8] - cbdma10 = self.cbdma_list[9] - cbdma11 = self.cbdma_list[10] - cbdma12 = self.cbdma_list[11] - cbdma13 = self.cbdma_list[12] - cbdma14 = self.cbdma_list[13] - cbdma15 = self.cbdma_list[14] - cbdma16 = self.cbdma_list[15] lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2},lcore{core1}@{cbdma3}," - f"lcore{core1}@{cbdma4},lcore{core1}@{cbdma5},lcore{core1}@{cbdma6}," - f"lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma9},lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12}," - f"lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) ) eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas={}'".format( - dmas - ) - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas={}'".format( - dmas - ) + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -418,25 +438,78 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - self.logger.info("Quit and relaunch vhost w/ diff CBDMA channels") self.pmdout_vhost_user.execute_cmd("quit", "#") lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2}," - f"lcore{core1}@{cbdma3},lcore{core1}@{cbdma4}," - f"lcore{core2}@{cbdma1},lcore{core2}@{cbdma3},lcore{core2}@{cbdma5}," - f"lcore{core2}@{cbdma6},lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma2},lcore{core3}@{cbdma4},lcore{core3}@{cbdma9}," - f"lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12}," - f"lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[2], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[2], + self.vhost_core_list[2], + self.cbdma_list[4], + self.vhost_core_list[2], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[1], + self.vhost_core_list[3], + self.cbdma_list[3], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) ) eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq1;txq2;txq3;txq4;txq5;txq6;txq7]'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1,dmas=[txq1;txq2;txq3;txq4;txq5;txq6;txq7]'" ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -451,14 +524,13 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.get_perf_result() if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost w/ iova=pa") eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.pmdout_vhost_user.execute_cmd("quit", "#") self.start_vhost_testpmd( @@ -473,11 +545,10 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - self.logger.info("Quit and relaunch vhost w/o CBDMA channels") self.pmdout_vhost_user.execute_cmd("quit", "#") eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=4'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=4'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=4,tso=1'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=4,tso=1'" ) param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=4 --rxq=4" self.start_vhost_testpmd( @@ -492,11 +563,10 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - self.logger.info("Quit and relaunch vhost w/o CBDMA channels with 1 queue") self.pmdout_vhost_user.execute_cmd("quit", "#") eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=4'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=4'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=4,tso=1'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=4,tso=1'" ) param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=1 --txq=1" self.start_vhost_testpmd( @@ -510,54 +580,73 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - def test_vm2vm_split_ring_with_non_mergeable_path_8queue_check_large_packet_and_cbdma_enable( + def test_vm2vm_virtio_net_split_ring_with_non_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check( self, ): """ - Test Case 3: VM2VM split ring vhost-user/virtio-net non-mergeable 8 queues CBDMA enable test with large packet payload valid check + Test Case 3: VM2VM virtio-net split ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check """ self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - dmas = self.generate_dms_param(8) - core1 = self.vhost_core_list[1] - core2 = self.vhost_core_list[2] - core3 = self.vhost_core_list[3] - core4 = self.vhost_core_list[4] - cbdma1 = self.cbdma_list[0] - cbdma2 = self.cbdma_list[1] - cbdma3 = self.cbdma_list[2] - cbdma4 = self.cbdma_list[3] - cbdma5 = self.cbdma_list[4] - cbdma6 = self.cbdma_list[5] - cbdma7 = self.cbdma_list[6] - cbdma8 = self.cbdma_list[7] - cbdma9 = self.cbdma_list[8] - cbdma10 = self.cbdma_list[9] - cbdma11 = self.cbdma_list[10] - cbdma12 = self.cbdma_list[11] - cbdma13 = self.cbdma_list[12] - cbdma14 = self.cbdma_list[13] - cbdma15 = self.cbdma_list[14] - cbdma16 = self.cbdma_list[15] lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2},lcore{core1}@{cbdma3}," - f"lcore{core1}@{cbdma4},lcore{core1}@{cbdma5},lcore{core1}@{cbdma6}," - f"lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma9},lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12}," - f"lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) ) eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas={}'".format( - dmas - ) - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas={}'".format( - dmas - ) + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -575,25 +664,78 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - self.logger.info("Quit and relaunch vhost w/ diff CBDMA channels") self.pmdout_vhost_user.execute_cmd("quit", "#") lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2}," - f"lcore{core1}@{cbdma3},lcore{core1}@{cbdma4}," - f"lcore{core2}@{cbdma1},lcore{core2}@{cbdma3},lcore{core2}@{cbdma5}," - f"lcore{core2}@{cbdma6},lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma2},lcore{core3}@{cbdma4},lcore{core3}@{cbdma9}," - f"lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12}," - f"lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[2], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[2], + self.vhost_core_list[2], + self.cbdma_list[4], + self.vhost_core_list[2], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[1], + self.vhost_core_list[3], + self.cbdma_list[3], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) ) eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq1;txq2;txq3;txq4;txq5;txq6]'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6]'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1,dmas=[txq1;txq2;txq3;txq4;txq5;txq6]'" ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -607,11 +749,10 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - self.logger.info("Quit and relaunch vhost w/o CBDMA channels") self.pmdout_vhost_user.execute_cmd("quit", "#") eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1'" ) param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" self.start_vhost_testpmd( @@ -627,11 +768,10 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - self.logger.info("Quit and relaunch vhost w/o CBDMA channels with 1 queue") self.pmdout_vhost_user.execute_cmd("quit", "#") eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8'" - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8'" + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,tso=1'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,tso=1'" ) param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=1 --rxq=1" self.start_vhost_testpmd( @@ -646,28 +786,73 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - def test_vm2vm_split_ring_with_mergeable_path_16queue_check_large_packet_and_cbdma_enable( + def test_vm2vm_virtio_net_split_ring_mergeable_16_queues_cbdma_enable_test_with_large_packet_payload_valid_check( self, ): """ - Test Case 4: VM2VM split ring vhost-user/virtio-net mergeable 16 queues CBDMA enable test with large packet payload valid check + Test Case 4: VM2VM virtio-net split ring mergeable 16 queues CBDMA enable test with large packet payload valid check """ self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - dmas = self.generate_dms_param(16) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:9] + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[2], + self.cbdma_list[2], + self.vhost_core_list[2], + self.cbdma_list[3], + self.vhost_core_list[3], + self.cbdma_list[4], + self.vhost_core_list[4], + self.cbdma_list[5], + self.vhost_core_list[4], + self.cbdma_list[6], + self.vhost_core_list[4], + self.cbdma_list[7], + self.vhost_core_list[5], + self.cbdma_list[8], + self.vhost_core_list[5], + self.cbdma_list[9], + self.vhost_core_list[6], + self.cbdma_list[10], + self.vhost_core_list[6], + self.cbdma_list[11], + self.vhost_core_list[7], + self.cbdma_list[12], + self.vhost_core_list[7], + self.cbdma_list[13], + self.vhost_core_list[8], + self.cbdma_list[14], + self.vhost_core_list[8], + self.cbdma_list[15], + ) ) eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=16,dmas={}'".format( - dmas - ) - + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=16,dmas={}'".format( - dmas - ) + "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=16,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7;rxq8;rxq9;rxq10;rxq11;rxq12;rxq13;rxq14;rxq15]'" + + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=16,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7;rxq8;rxq9;rxq10;rxq11;rxq12;rxq13;rxq14;rxq15]'" ) + param = ( " --nb-cores=8 --txd=1024 --rxd=1024 --txq=16 --rxq=16" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -685,21 +870,24 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - def test_vm2vm_packed_ring_iperf_with_tso_and_cbdma_enable(self): + def test_vm2vm_virtio_net_packed_ring_cbdma_enable_test_with_tcp_traffic(self): """ - Test Case 5: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic + Test Case 5: VM2VM virtio-net packed ring CBDMA enable test with tcp traffic """ - self.get_cbdma_ports_info_and_bind_to_dpdk(2) - dmas = self.generate_dms_param(1) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:3] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) + lcore_dma = "lcore%s@%s," "lcore%s@%s" % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[1], + ) + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=1,tso=1,dmas=[txq0;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,tso=1,dmas=[txq0;rxq0]'" ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=1,dmas={}'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,dmas={}'".format(dmas) param = ( " --nb-cores=2 --txd=1024 --rxd=1024 --txq=1 --rxq=1" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -716,46 +904,72 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.get_perf_result() self.verify_xstats_info_on_vhost() - def test_vm2vm_packed_ring_with_mergeable_path_8queue_check_large_packet_and_cbdma_enable( + def test_vm2vm_virtio_net_packed_ring_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check( self, ): """ Test Case 6: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check """ - self.get_cbdma_ports_info_and_bind_to_dpdk(16, allow_diff_socket=True) - dmas = self.generate_dms_param(7) - core1 = self.vhost_core_list[1] - core2 = self.vhost_core_list[2] - core3 = self.vhost_core_list[3] - core4 = self.vhost_core_list[4] - cbdma1 = self.cbdma_list[0] - cbdma2 = self.cbdma_list[1] - cbdma3 = self.cbdma_list[2] - cbdma4 = self.cbdma_list[3] - cbdma5 = self.cbdma_list[4] - cbdma6 = self.cbdma_list[5] - cbdma7 = self.cbdma_list[6] - cbdma8 = self.cbdma_list[7] - cbdma9 = self.cbdma_list[8] - cbdma10 = self.cbdma_list[9] - cbdma11 = self.cbdma_list[10] - cbdma12 = self.cbdma_list[11] - cbdma13 = self.cbdma_list[12] - cbdma14 = self.cbdma_list[13] - cbdma15 = self.cbdma_list[14] - cbdma16 = self.cbdma_list[15] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2},lcore{core1}@{cbdma3},lcore{core1}@{cbdma4}," - f"lcore{core2}@{cbdma1},lcore{core2}@{cbdma3},lcore{core2}@{cbdma5},lcore{core2}@{cbdma6},lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma2},lcore{core3}@{cbdma4},lcore{core3}@{cbdma9},lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12},lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" - ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=8,dmas={}'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,dmas={}'".format(dmas) + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) + ) + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" + ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -764,7 +978,6 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): param=param, iova_mode="va", ) - # self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" self.start_vms(server_mode=False, vm_queue=8) self.config_vm_ip() @@ -775,48 +988,72 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - def test_vm2vm_packed_ring_with_non_mergeable_path_8queue_check_large_packet_and_cbdma_enable( + def test_vm2vm_virtio_net_packed_ring_non_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check( self, ): """ Test Case 7: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check """ - self.get_cbdma_ports_info_and_bind_to_dpdk(16, allow_diff_socket=True) - dmas = self.generate_dms_param(8) - core1 = self.vhost_core_list[1] - core2 = self.vhost_core_list[2] - core3 = self.vhost_core_list[3] - core4 = self.vhost_core_list[4] - cbdma1 = self.cbdma_list[0] - cbdma2 = self.cbdma_list[1] - cbdma3 = self.cbdma_list[2] - cbdma4 = self.cbdma_list[3] - cbdma5 = self.cbdma_list[4] - cbdma6 = self.cbdma_list[5] - cbdma7 = self.cbdma_list[6] - cbdma8 = self.cbdma_list[7] - cbdma9 = self.cbdma_list[8] - cbdma10 = self.cbdma_list[9] - cbdma11 = self.cbdma_list[10] - cbdma12 = self.cbdma_list[11] - cbdma13 = self.cbdma_list[12] - cbdma14 = self.cbdma_list[13] - cbdma15 = self.cbdma_list[14] - cbdma16 = self.cbdma_list[15] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2},lcore{core1}@{cbdma3}," - f"lcore{core1}@{cbdma4},lcore{core1}@{cbdma5},lcore{core1}@{cbdma6}," - f"lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma9},lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12}," - f"lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" - ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=8,dmas={}'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,dmas={}'".format(dmas) + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) + ) + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,tso=1,dmas=[txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5]'" + ) param = ( " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -835,23 +1072,72 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - def test_vm2vm_packed_ring_with_mergeable_path_16queue_check_large_packet_and_cbdma_enable( + def test_vm2vm_virtio_net_packed_ring_mergeable_16_queues_cbdma_enable_test_with_large_packet_payload_check( self, ): """ Test Case 8: VM2VM virtio-net packed ring mergeable 16 queues CBDMA enabled test with large packet payload valid check """ - self.get_cbdma_ports_info_and_bind_to_dpdk(16, allow_diff_socket=True) - dmas = self.generate_dms_param(16) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:9] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[2], + self.cbdma_list[2], + self.vhost_core_list[2], + self.cbdma_list[3], + self.vhost_core_list[3], + self.cbdma_list[4], + self.vhost_core_list[4], + self.cbdma_list[5], + self.vhost_core_list[4], + self.cbdma_list[6], + self.vhost_core_list[4], + self.cbdma_list[7], + self.vhost_core_list[5], + self.cbdma_list[8], + self.vhost_core_list[5], + self.cbdma_list[9], + self.vhost_core_list[6], + self.cbdma_list[10], + self.vhost_core_list[6], + self.cbdma_list[11], + self.vhost_core_list[7], + self.cbdma_list[12], + self.vhost_core_list[7], + self.cbdma_list[13], + self.vhost_core_list[8], + self.cbdma_list[14], + self.vhost_core_list[8], + self.cbdma_list[15], + ) + ) + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=16,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11,txq12,txq13;txq14;txq15;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7;rxq8;rxq9;rxq10;rxq11;rxq12;rxq13;rxq14;rxq15]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=16,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11,txq12,txq13;txq14;txq15;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7;rxq8;rxq9;rxq10;rxq11;rxq12;rxq13;rxq14;rxq15]'" ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=16,dmas={}'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=16,dmas={}'".format(dmas) param = ( " --nb-cores=8 --txd=1024 --rxd=1024 --txq=16 --rxq=16" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -870,96 +1156,129 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.start_iperf() self.get_perf_result() - def test_vm2vm_packed_ring_iperf_with_tso_when_set_ivoa_pa_and_cbdma_enable(self): + def test_vm2vm_virtio_net_packed_ring_cbdma_enable_test_with_tcp_traffic_when_set_iova_pa( + self, + ): """ - Test Case 9: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic when set iova=pa + Test Case 9: VM2VM virtio-net packed ring CBDMA enable test with tcp traffic when set iova=pa """ - self.get_cbdma_ports_info_and_bind_to_dpdk(2) - dmas = self.generate_dms_param(1) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:3] - ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=1,dmas={}'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,dmas={}'".format(dmas) - param = ( - " --nb-cores=2 --txd=1024 --rxd=1024 --txq=1 --rxq=1" - + " --lcore-dma={}".format(lcore_dma) - ) - self.start_vhost_testpmd( - cores=self.vhost_core_list, - ports=self.cbdma_list, - eal_param=eal_param, - param=param, - iova_mode="pa", - ) - self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" - self.start_vms(server_mode=False, vm_queue=1) - self.config_vm_ip() - self.check_ping_between_vms() - self.check_scp_file_valid_between_vms() - self.start_iperf() - self.get_perf_result() - self.verify_xstats_info_on_vhost() + if not self.check_2M_env: + self.get_cbdma_ports_info_and_bind_to_dpdk(2) + lcore_dma = "lcore%s@%s," "lcore%s@%s" % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[1], + ) + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=1,tso=1,dmas=[txq0;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,tso=1,dmas=[txq0;rxq0]'" + ) + param = ( + " --nb-cores=2 --txd=1024 --rxd=1024 --txq=1 --rxq=1" + + " --lcore-dma=[%s]" % lcore_dma + ) + self.start_vhost_testpmd( + cores=self.vhost_core_list, + ports=self.cbdma_list, + eal_param=eal_param, + param=param, + iova_mode="pa", + ) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on" + self.start_vms(server_mode=False, vm_queue=1) + self.config_vm_ip() + self.check_ping_between_vms() + self.check_scp_file_valid_between_vms() + self.start_iperf() + self.get_perf_result() + self.verify_xstats_info_on_vhost() - def test_vm2vm_packed_ring_with_mergeable_path_8queue_check_large_packet_when_set_ivoa_pa_and_cbdma_enable( + def test_vm2vm_virtio_net_packed_ring_mergeable_8_queues_cbdma_enable_and_pa_mode_test_with_large_packet_payload_valid_check( self, ): """ Test Case 10: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable and PA mode test with large packet payload valid check """ - self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) - dmas = self.generate_dms_param(7) - core1 = self.vhost_core_list[1] - core2 = self.vhost_core_list[2] - core3 = self.vhost_core_list[3] - core4 = self.vhost_core_list[4] - cbdma1 = self.cbdma_list[0] - cbdma2 = self.cbdma_list[1] - cbdma3 = self.cbdma_list[2] - cbdma4 = self.cbdma_list[3] - cbdma5 = self.cbdma_list[4] - cbdma6 = self.cbdma_list[5] - cbdma7 = self.cbdma_list[6] - cbdma8 = self.cbdma_list[7] - cbdma9 = self.cbdma_list[8] - cbdma10 = self.cbdma_list[9] - cbdma11 = self.cbdma_list[10] - cbdma12 = self.cbdma_list[11] - cbdma13 = self.cbdma_list[12] - cbdma14 = self.cbdma_list[13] - cbdma15 = self.cbdma_list[14] - cbdma16 = self.cbdma_list[15] - lcore_dma = ( - f"[lcore{core1}@{cbdma1},lcore{core1}@{cbdma2},lcore{core1}@{cbdma3}," - f"lcore{core1}@{cbdma4},lcore{core1}@{cbdma5},lcore{core1}@{cbdma6}," - f"lcore{core2}@{cbdma7},lcore{core2}@{cbdma8}," - f"lcore{core3}@{cbdma9},lcore{core3}@{cbdma10},lcore{core3}@{cbdma11},lcore{core3}@{cbdma12}," - f"lcore{core3}@{cbdma13},lcore{core3}@{cbdma14},lcore{core3}@{cbdma15}," - f"lcore{core4}@{cbdma16}]" - ) - eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=8,dmas={}'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,dmas={}'".format(dmas) - param = ( - " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" - + " --lcore-dma={}".format(lcore_dma) - ) - self.start_vhost_testpmd( - cores=self.vhost_core_list, - ports=self.cbdma_list, - eal_param=eal_param, - param=param, - iova_mode="pa", - ) - self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" - self.start_vms(server_mode=False, vm_queue=8) - self.config_vm_ip() - self.check_ping_between_vms() - for _ in range(1): - self.check_scp_file_valid_between_vms() - self.start_iperf() - self.get_perf_result() + if not self.check_2M_env: + self.get_cbdma_ports_info_and_bind_to_dpdk( + cbdma_num=16, allow_diff_socket=True + ) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + self.vhost_core_list[3], + self.cbdma_list[8], + self.vhost_core_list[3], + self.cbdma_list[9], + self.vhost_core_list[3], + self.cbdma_list[10], + self.vhost_core_list[3], + self.cbdma_list[11], + self.vhost_core_list[3], + self.cbdma_list[12], + self.vhost_core_list[3], + self.cbdma_list[13], + self.vhost_core_list[3], + self.cbdma_list[14], + self.vhost_core_list[4], + self.cbdma_list[15], + ) + ) + eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=8,tso=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,tso=1,dmas=[txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5]'" + ) + param = ( + " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8" + + " --lcore-dma=[%s]" % lcore_dma + ) + self.start_vhost_testpmd( + cores=self.vhost_core_list, + ports=self.cbdma_list, + eal_param=eal_param, + param=param, + iova_mode="pa", + ) + self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on" + self.start_vms(server_mode=False, vm_queue=8) + self.config_vm_ip() + self.check_ping_between_vms() + for _ in range(1): + self.check_scp_file_valid_between_vms() + self.start_iperf() + self.get_perf_result() def stop_all_apps(self): for i in range(len(self.vm)): -- 2.25.1