From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4C894A00C3; Tue, 16 Aug 2022 08:10:07 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3ADE340691; Tue, 16 Aug 2022 08:10:07 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 7024D4067C for ; Tue, 16 Aug 2022 08:10:04 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1660630204; x=1692166204; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=pQFY4PG2YP0F41LaO57Wx9rorgRwpQ2GLYV1sMdMGj0=; b=cvdRY2e+FhLEyQtl2dOSVIlvwf1ITrazArJ8p1YIG/N6ZWIbALvO+rp2 eW5TuCn+XuEmLXPUp9MIyYgAEHldgJ55uqgjj3miDA/Mn5RxHvy5TWTpS xUDlA7DuslkkzA7GAIQZZaxUJX1wLpUfrmSsyWox4BlmIQ87kQDpxROJB 8uWV0Ry44Lv74YZZHgqbkxVJga0Ciu0NsIntZ8Gy0wNkK3yMDLe5EfdNF WyXg4nsu1O/vokseopNiKpKxJxzqof3TlfERtL034MpN9p/06NeGCwt0B CtVOYpO96fj+IsmMpZK0Uc8BBXxLCRSmb0+Q+hVRIMH7FmbTkpR1EW79E w==; X-IronPort-AV: E=McAfee;i="6400,9594,10440"; a="279096767" X-IronPort-AV: E=Sophos;i="5.93,240,1654585200"; d="scan'208,223";a="279096767" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Aug 2022 23:10:03 -0700 X-IronPort-AV: E=Sophos;i="5.93,240,1654585200"; d="scan'208,223";a="666969489" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Aug 2022 23:10:01 -0700 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V4 2/2] tests/vm2vm_virtio_user_cbdma: modify testsuite to test virito dequeue Date: Tue, 16 Aug 2022 02:05:54 -0400 Message-Id: <20220816060554.3418053-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org >From DPDK-22.07, virtio support async dequeue for split and packed ring path, so modify vm2vm_virtio_user_cbdma testsuite to test the split and packed ring async dequeue feature. Signed-off-by: Wei Ling --- tests/TestSuite_vm2vm_virtio_user_cbdma.py | 868 ++++++++++++--------- 1 file changed, 500 insertions(+), 368 deletions(-) diff --git a/tests/TestSuite_vm2vm_virtio_user_cbdma.py b/tests/TestSuite_vm2vm_virtio_user_cbdma.py index 0057047c..47d67846 100644 --- a/tests/TestSuite_vm2vm_virtio_user_cbdma.py +++ b/tests/TestSuite_vm2vm_virtio_user_cbdma.py @@ -6,9 +6,9 @@ DPDK Test suite. Test cases for vm2vm virtio-user -This suite include split virtqueue vm2vm in-order mergeable, -in-order non-mergeable,mergeable, non-mergeable, vector_rx path -test and packed virtqueue vm2vm in-order mergeable, in-order non-mergeable, +This suite include split virtqueue vm2vm in-order mergeable, in-order non-mergeable, +mergeable, non-mergeable, vector_rx path test +and packed virtqueue vm2vm in-order mergeable, in-order non-mergeable, mergeable, non-mergeable path test """ import re @@ -28,8 +28,8 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) self.cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.cores_list[0:9] - self.virtio0_core_list = self.cores_list[10:12] - self.virtio1_core_list = self.cores_list[12:14] + self.virtio0_core_list = self.cores_list[10:13] + self.virtio1_core_list = self.cores_list[13:15] self.vhost_user = self.dut.new_session(suite="vhost-user") self.virtio_user0 = self.dut.new_session(suite="virtio-user0") self.virtio_user1 = self.dut.new_session(suite="virtio-user1") @@ -37,7 +37,10 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1) - self.testpmd_name = self.dut.apps_name["test-pmd"].split("/")[-1] + self.path = self.dut.apps_name["test-pmd"] + self.testpmd_name = self.path.split("/")[-1] + self.app_pdump = self.dut.apps_name["pdump"] + self.pdump_name = self.app_pdump.split("/")[-1] def set_up(self): """ @@ -49,6 +52,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.dut.send_expect("rm -rf %s" % self.dump_virtio_pcap, "#") self.dut.send_expect("rm -rf %s" % self.dump_vhost_pcap, "#") self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.dut.send_expect("killall -s INT %s" % self.pdump_name, "#") def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): """ @@ -88,33 +92,6 @@ class TestVM2VMVirtioUserCbdma(TestCase): 60, ) - @staticmethod - def generate_dms_param(queues): - das_list = [] - for i in range(queues): - das_list.append("txq{}".format(i)) - das_param = "[{}]".format(";".join(das_list)) - return das_param - - @staticmethod - def generate_lcore_dma_param(cbdma_list, core_list): - group_num = int(len(cbdma_list) / len(core_list)) - lcore_dma_list = [] - if len(cbdma_list) == 1: - for core in core_list: - lcore_dma_list.append("lcore{}@{}".format(core, cbdma_list[0])) - elif len(core_list) == 1: - for cbdma in cbdma_list: - lcore_dma_list.append("lcore{}@{}".format(core_list[0], cbdma)) - else: - for cbdma in cbdma_list: - core_list_index = int(cbdma_list.index(cbdma) / group_num) - lcore_dma_list.append( - "lcore{}@{}".format(core_list[core_list_index], cbdma) - ) - lcore_dma_param = "[{}]".format(",".join(lcore_dma_list)) - return lcore_dma_param - def bind_cbdma_device_to_kernel(self): self.dut.send_expect("modprobe ioatdma", "# ") self.dut.send_expect( @@ -179,7 +156,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): fixed_prefix=True, ) - def send_251_960byte_and_32_64byte_pkts(self): + def send_502_960byte_and_64_64byte_pkts(self): """ send 251 960byte and 32 64byte length packets from virtio_user0 testpmd """ @@ -194,28 +171,28 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.virtio_user0_pmd.execute_cmd("start tx_first 1") self.virtio_user0_pmd.execute_cmd("stop") self.vhost_user_pmd.execute_cmd("start") - out = self.vhost_user_pmd.execute_cmd("show port stats all") - self.logger.info(out) + self.vhost_user_pmd.execute_cmd("show port stats all") - def send_27_4640byte_and_224_64byte_pkts(self): + def send_502_64byte_and_64_4640byte_pkts(self): """ - send 54 4640byte and 448 64byte length packets from virtio_user0 testpmd + send 502 64byte and 64 4640byte length packets from virtio_user0 testpmd """ self.virtio_user0_pmd.execute_cmd("set burst 1") - self.virtio_user0_pmd.execute_cmd("set txpkts 64,256,2000,64,256,2000") + self.virtio_user0_pmd.execute_cmd("set txpkts 64") self.virtio_user0_pmd.execute_cmd("start tx_first 27") self.virtio_user0_pmd.execute_cmd("stop") self.virtio_user0_pmd.execute_cmd("set burst 32") - self.virtio_user0_pmd.execute_cmd("set txpkts 64") self.virtio_user0_pmd.execute_cmd("start tx_first 7") self.virtio_user0_pmd.execute_cmd("stop") + self.virtio_user0_pmd.execute_cmd("set txpkts 64,256,2000,64,256,2000") + self.virtio_user0_pmd.execute_cmd("start tx_first 1") + self.virtio_user0_pmd.execute_cmd("stop") self.vhost_user_pmd.execute_cmd("start") - out = self.vhost_user_pmd.execute_cmd("show port stats all") - self.logger.info(out) + self.vhost_user_pmd.execute_cmd("show port stats all") - def send_224_64byte_and_27_4640byte_pkts(self): + def send_448_64byte_and_54_4640byte_pkts(self): """ - send 54 4640byte and 448 64byte length packets from virtio_user0 testpmd + send 448 64byte and 54 4640byte length packets from virtio_user0 testpmd """ self.virtio_user0_pmd.execute_cmd("set burst 32") self.virtio_user0_pmd.execute_cmd("set txpkts 64") @@ -226,14 +203,14 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.virtio_user0_pmd.execute_cmd("start tx_first 27") self.virtio_user0_pmd.execute_cmd("stop") self.vhost_user_pmd.execute_cmd("start") - out = self.vhost_user_pmd.execute_cmd("show port stats all") - self.logger.info(out) + self.vhost_user_pmd.execute_cmd("show port stats all") - def send_251_64byte_and_32_8000byte_pkts(self): + def send_502_64byte_and_64_8000byte_pkts(self): """ send 54 4640byte and 448 64byte length packets from virtio_user0 testpmd """ self.virtio_user0_pmd.execute_cmd("set burst 1") + self.virtio_user0_pmd.execute_cmd("set txpkts 64") self.virtio_user0_pmd.execute_cmd("start tx_first 27") self.virtio_user0_pmd.execute_cmd("stop") self.virtio_user0_pmd.execute_cmd("set burst 32") @@ -243,16 +220,32 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.virtio_user0_pmd.execute_cmd("start tx_first 1") self.virtio_user0_pmd.execute_cmd("stop") self.vhost_user_pmd.execute_cmd("start") - out = self.vhost_user_pmd.execute_cmd("show port stats all") - self.logger.info(out) + self.vhost_user_pmd.execute_cmd("show port stats all") + + def send_54_4640byte_and_448_64byte_pkts(self): + """ + send 54 4640byte and 448 64byte length packets from virtio_user0 testpmd + """ + self.virtio_user0_pmd.execute_cmd("set burst 1") + self.virtio_user0_pmd.execute_cmd("set txpkts 64,256,2000,64,256,2000") + self.virtio_user0_pmd.execute_cmd("start tx_first 27") + self.virtio_user0_pmd.execute_cmd("stop") + self.virtio_user0_pmd.execute_cmd("set burst 32") + self.virtio_user0_pmd.execute_cmd("set txpkts 64") + self.virtio_user0_pmd.execute_cmd("start tx_first 7") + self.virtio_user0_pmd.execute_cmd("stop") + self.vhost_user_pmd.execute_cmd("start") + self.vhost_user_pmd.execute_cmd("show port stats all") def send_1_64byte_pkts(self): + """ + send 1 64byte length packets from virtio_user0 testpmd + """ self.virtio_user0_pmd.execute_cmd("set burst 1") self.virtio_user0_pmd.execute_cmd("start tx_first 1") self.virtio_user0_pmd.execute_cmd("stop") self.vhost_user_pmd.execute_cmd("start") - out = self.vhost_user_pmd.execute_cmd("show port stats all") - self.logger.info(out) + self.vhost_user_pmd.execute_cmd("show port stats all") def clear_virtio_user1_stats(self): self.virtio_user1_pmd.execute_cmd("stop") @@ -268,17 +261,12 @@ class TestVM2VMVirtioUserCbdma(TestCase): if dump the vhost-testpmd, the vhost-testpmd should started before launch pdump if dump the virtio-testpmd, the virtio-testpmd should started before launch pdump """ - eal_params = self.dut.create_eal_parameters( - cores="Default", prefix="virtio-user1", fixed_prefix=True, ports=[] - ) command_line = ( self.app_pdump - + " %s -v -- " + + "-l 1-2 -n 4 --file-prefix=virtio-user1 -v -- " + "--pdump 'device_id=net_virtio_user1,queue=*,rx-dev=%s,mbuf-size=8000'" ) - self.pdump_user.send_expect( - command_line % (eal_params, self.dump_virtio_pcap), "Port" - ) + self.pdump_user.send_expect(command_line % (self.dump_virtio_pcap), "Port") def check_virtio_user1_stats(self, check_dict): """ @@ -328,38 +316,32 @@ class TestVM2VMVirtioUserCbdma(TestCase): "pdump file: {} have not include enough packets {}".format(count, key), ) - def check_vhost_user_testpmd_logs(self): - out = self.vhost_user.get_session_before(timeout=30) - check_logs = [ - "DMA completion failure on channel", - "DMA copy failed for channel", - ] - for check_log in check_logs: - self.verify(check_log not in out, "Vhost-user testpmd Exception") - def quit_all_testpmd(self): - self.vhost_user_pmd.quit() - self.virtio_user0_pmd.quit() - self.virtio_user1_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) + self.virtio_user0_pmd.execute_cmd("quit", "#", 60) + self.virtio_user1_pmd.execute_cmd("quit", "#", 60) self.pdump_user.send_expect("^c", "# ", 60) - def test_split_ring_non_mergeable_path_multi_queues_with_cbdma(self): + def test_split_ring_non_mergeable_path_multi_queues_payload_check_with_cbdma_enable( + self, + ): """ - Test Case 1: split virtqueue vm2vm non-mergeable path multi-queues payload check with cbdma enable + Test Case 1: VM2VM split ring non-mergeable path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(2) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) + lcore_dma = "lcore%s@%s," "lcore%s@%s" % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -369,7 +351,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): iova_mode="va", ) - virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=0,in_order=0,queue_size=4096" + virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,mrg_rxbuf=0,in_order=0,queue_size=4096" virtio1_param = ( " --enable-hw-vlan-strip --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" ) @@ -380,7 +362,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=0,in_order=0,queue_size=4096" + virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=0,in_order=0,queue_size=4096" virtio0_param = ( " --enable-hw-vlan-strip --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" ) @@ -390,15 +372,13 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_251_960byte_and_32_64byte_pkts() + self.send_502_960byte_and_64_64byte_pkts() check_dict = {960: 502, 64: 64} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - self.check_vhost_user_testpmd_logs() if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -409,95 +389,44 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_251_960byte_and_32_64byte_pkts() + self.send_502_960byte_and_64_64byte_pkts() check_dict = {960: 502, 64: 64} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - self.check_vhost_user_testpmd_logs() - def test_split_ring_mergeable_path_multi_queues_with_cbdma(self): + def test_split_ping_inorder_non_mergeable_path_multi_queues_payload_check_with_cbdma_enable( + self, + ): """ - Test Case 2: split virtqueue vm2vm mergeable path multi-queues payload check with cbdma enable + Test Case 2: VM2VM split ring inorder non-mergeable path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(1) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list[0:1], core_list=self.vhost_core_list[1:2] - ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) - vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) - ) - self.start_vhost_testpmd( - cores=self.vhost_core_list, - eal_param=vhost_eal_param, - param=vhost_param, - ports=self.cbdma_list, - iova_mode="va", - ) - - virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=4096" - virtio1_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" - self.start_virtio_testpmd_with_vhost_net1( - cores=self.virtio1_core_list, - eal_param=virtio1_eal_param, - param=virtio1_param, - ) - self.start_pdump_to_capture_pkt() - - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=4096" - virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" - self.start_virtio_testpmd_with_vhost_net0( - cores=self.virtio0_core_list, - eal_param=virtio0_eal_param, - param=virtio0_param, - ) - - self.send_27_4640byte_and_224_64byte_pkts() - check_dict = {4640: 54, 64: 448} - self.check_virtio_user1_stats(check_dict) - self.check_packet_payload_valid(check_dict) - - if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() - self.clear_virtio_user1_stats() - self.start_vhost_testpmd( - cores=self.vhost_core_list, - eal_param=vhost_eal_param, - param=vhost_param, - ports=self.cbdma_list, - iova_mode="pa", + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=5) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], ) - self.start_pdump_to_capture_pkt() - - self.send_27_4640byte_and_224_64byte_pkts() - check_dict = {4640: 54, 64: 448} - self.check_virtio_user1_stats(check_dict) - self.check_packet_payload_valid(check_dict) - - def test_split_ring_inorder_non_mergeable_path_multi_queues_with_cbdma(self): - """ - Test Case 3: split virtqueue vm2vm inorder non-mergeable path multi-queues payload check with cbdma enable - """ - self.get_cbdma_ports_info_and_bind_to_dpdk(5) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -507,7 +436,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): iova_mode="va", ) - virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=0,in_order=1,queue_size=4096" + virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,mrg_rxbuf=0,in_order=1,queue_size=4096" virtio1_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" self.start_virtio_testpmd_with_vhost_net1( cores=self.virtio1_core_list, @@ -516,7 +445,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=0,in_order=1,queue_size=4096" + virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=0,in_order=1,queue_size=4096" virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, @@ -524,14 +453,13 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_27_4640byte_and_224_64byte_pkts() - check_dict = {64: 448} + self.send_502_64byte_and_64_4640byte_pkts() + check_dict = {64: 502, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -542,26 +470,53 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_27_4640byte_and_224_64byte_pkts() - check_dict = {64: 448} + self.send_502_64byte_and_64_4640byte_pkts() + check_dict = {64: 502, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_split_ring_vectorized_path_multi_queues_with_cbdma(self): + def test_split_ring_verctorized_path_multi_queues_payload_check_with_cbdma_enabled( + self, + ): """ - Test Case 4: split virtqueue vm2vm vectorized path multi-queues payload check with cbdma enable + Test Case 3: VM2VM split ring vectorized path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(8) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[1], + self.cbdma_list[6], + self.vhost_core_list[1], + self.cbdma_list[7], + ) ) vhost_eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0],dma_ring_size=2048'" - + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq1],dma_ring_size=2048'" + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -580,7 +535,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=0,in_order=0,vectorized=1,queue_size=4096" + virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=0,in_order=0,,vectorized=1,queue_size=4096" virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, @@ -588,14 +543,13 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -606,30 +560,41 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_split_ring_inorder_mergeable_path_multi_queues_test_non_indirect_descriptor_with_cbdma( + def test_split_ring_inorder_mergeable_path_test_non_indirect_descriptor_with_cbdma_enabled( self, ): """ - Test Case 5: Split virtqueue vm2vm inorder mergeable path test non-indirect descriptor with cbdma enable + Test Case 4: VM2VM split ring inorder mergeable path test non-indirect descriptor with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(4) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=256 --rxd=256 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --txq=2 --rxq=2 --txd=256 --rxd=256 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -639,7 +604,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): iova_mode="va", ) - virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=1,queue_size=256" + virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,mrg_rxbuf=1,in_order=1,queue_size=256" virtio1_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=256 --rxd=256" self.start_virtio_testpmd_with_vhost_net1( cores=self.virtio1_core_list, @@ -648,7 +613,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=1,queue_size=256" + virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=1,in_order=1,queue_size=256" virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=256 --rxd=256" self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, @@ -656,16 +621,15 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_251_64byte_and_32_8000byte_pkts() + self.send_502_64byte_and_64_8000byte_pkts() check_dict = {64: 502, 8000: 2} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() - self.virtio_user1_pmd.quit() - self.virtio_user0_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) + self.virtio_user1_pmd.execute_cmd("quit", "#", 60) + self.virtio_user0_pmd.execute_cmd("quit", "#", 60) self.start_vhost_testpmd( cores=self.vhost_core_list, eal_param=vhost_eal_param, @@ -679,36 +643,46 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio1_param, ) self.start_pdump_to_capture_pkt() + self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, eal_param=virtio0_eal_param, param=virtio0_param, ) - self.send_251_64byte_and_32_8000byte_pkts() + self.send_502_64byte_and_64_8000byte_pkts() check_dict = {64: 502, 8000: 2} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_split_ring_inorder_mergeable_path_multi_queues_test_indirect_descriptor_with_cbdma( - self, - ): + def test_split_ring_mergeable_path_test_indirect_descriptor_with_cbdma_enable(self): """ - Test Case 6: Split virtqueue vm2vm mergeable path test indirect descriptor with cbdma enable + Test Case 5: VM2VM split ring mergeable path test indirect descriptor with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(4) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" ) vhost_param = ( - " --nb-cores=1 --txd=256 --rxd=256 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=256 --rxd=256 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -718,7 +692,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): iova_mode="va", ) - virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=256" + virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,mrg_rxbuf=1,in_order=0,queue_size=256" virtio1_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=256 --rxd=256" self.start_virtio_testpmd_with_vhost_net1( cores=self.virtio1_core_list, @@ -727,7 +701,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=256" + virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=1,in_order=0,queue_size=256" virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=256 --rxd=256" self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, @@ -735,16 +709,19 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_251_64byte_and_32_8000byte_pkts() + self.send_502_64byte_and_64_8000byte_pkts() check_dict = {64: 502, 8000: 10} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() - self.virtio_user1_pmd.quit() - self.virtio_user0_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) + self.virtio_user1_pmd.execute_cmd("quit", "#", 60) + self.virtio_user0_pmd.execute_cmd("quit", "#", 60) + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" + ) self.start_vhost_testpmd( cores=self.vhost_core_list, eal_param=vhost_eal_param, @@ -764,28 +741,31 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_251_64byte_and_32_8000byte_pkts() + self.send_502_64byte_and_64_8000byte_pkts() check_dict = {64: 502, 8000: 10} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_non_mergeable_path_multi_queues_with_cbdma(self): + def test_packed_ring_non_mergeable_path_multi_queues_payload_check_with_cbdma_enable( + self, + ): """ - Test Case 7: packed virtqueue vm2vm non-mergeable path multi-queues payload check with cbdma enable + Test Case 6: VM2VM packed ring non-mergeable path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(2) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) + lcore_dma = "lcore%s@%s," "lcore%s@%s" % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -796,7 +776,9 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=1,mrg_rxbuf=0,in_order=0,queue_size=4096" - virtio1_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" + virtio1_param = ( + " --enable-hw-vlan-strip --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" + ) self.start_virtio_testpmd_with_vhost_net1( cores=self.virtio1_core_list, eal_param=virtio1_eal_param, @@ -805,22 +787,27 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.start_pdump_to_capture_pkt() virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=1,mrg_rxbuf=0,in_order=0,queue_size=4096" - virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" + virtio0_param = ( + " --enable-hw-vlan-strip --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" + ) self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, eal_param=virtio0_eal_param, param=virtio0_param, ) - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" + ) self.start_vhost_testpmd( cores=self.vhost_core_list, eal_param=vhost_eal_param, @@ -830,28 +817,26 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_mergeable_path_multi_queues_with_cbdma(self): + def test_packed_ring_mergeable_path_multi_queues_payload_check_with_cbdma_enabled( + self, + ): """ - Test Case 8: packed virtqueue vm2vm mergeable path multi-queues payload check with cbdma enable + Test Case 7: VM2VM packed ring mergeable path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(1) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list[0:1], core_list=self.vhost_core_list[1:2] - ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1) + lcore_dma = "lcore%s@%s," % (self.vhost_core_list[1], self.cbdma_list[0]) + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -878,15 +863,18 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_27_4640byte_and_224_64byte_pkts() + self.send_54_4640byte_and_448_64byte_pkts() check_dict = {4640: 54, 64: 448} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" + ) self.start_vhost_testpmd( cores=self.vhost_core_list, eal_param=vhost_eal_param, @@ -896,28 +884,44 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_27_4640byte_and_224_64byte_pkts() + self.send_54_4640byte_and_448_64byte_pkts() check_dict = {4640: 54, 64: 448} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_inorder_mergeable_path_multi_queues_with_cbdma(self): + def test_packed_ring_inorder_mergeable_path_multi_queues_payload_check_with_cbdma_enabled( + self, + ): """ - Test Case 9: packed virtqueue vm2vm inorder mergeable path multi-queues payload check with cbdma enable + Test Case 8: VM2VM packed ring inorder mergeable path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(5) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=5) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0]'" ) vhost_param = ( " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -944,14 +948,13 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_27_4640byte_and_224_64byte_pkts() + self.send_54_4640byte_and_448_64byte_pkts() check_dict = {4640: 54, 64: 448} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -962,28 +965,53 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_27_4640byte_and_224_64byte_pkts() + self.send_54_4640byte_and_448_64byte_pkts() check_dict = {4640: 54, 64: 448} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_inorder_non_mergeable_path_multi_queues_with_cbdma(self): + def test_packed_ring_inorder_non_mergeable_path_multi_queues_payload_check_with_cbdma_enabled( + self, + ): """ - Test Case 10: packed virtqueue vm2vm inorder non-mergeable path multi-queues payload check with cbdma enable + Test Case 9: VM2VM packed ring inorder non-mergeable path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(8) - dmas = self.generate_dms_param(1) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[1], + self.cbdma_list[6], + self.vhost_core_list[1], + self.cbdma_list[7], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq1;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1002,7 +1030,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=1,mrg_rxbuf=0,in_order=1,queue_size=4096" + virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,mrg_rxbuf=0,in_order=0,vectorized=1,queue_size=4096" virtio0_param = " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096" self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, @@ -1010,14 +1038,13 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1028,26 +1055,53 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_inorder_vectorized_rx_path_multi_queues_with_cbdma(self): + def test_packed_ring_vectorized_rx_path_multi_queues_payload_check_with_cbdma_enabled( + self, + ): """ - Test Case 11: packed virtqueue vm2vm vectorized-rx path multi-queues payload check with cbdma enable + Test Case 10: VM2VM packed ring vectorized-rx path multi-queues payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(8) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:3] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[2], + self.cbdma_list[4], + self.vhost_core_list[2], + self.cbdma_list[5], + self.vhost_core_list[2], + self.cbdma_list[6], + self.vhost_core_list[2], + self.cbdma_list[7], + ) ) vhost_eal_param = ( - "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0],dma_ring_size=2048'" - + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq1],dma_ring_size=2048'" + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq1;rxq1]'" ) vhost_param = ( - " --nb-cores=2 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=2 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1074,14 +1128,13 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1092,30 +1145,53 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_inorder_vectorized_path_multi_queues_check_with_ring_size_is_not_power_of_2_queues_with_cbdma( + def test_packed_ring_vectorized_path_multi_queues_payload_check_with_cbdma_enabled( self, ): """ - Test Case 12: packed virtqueue vm2vm vectorized path multi-queues payload check with ring size is not power of 2 and cbdma enabled + Test Case 11: VM2VM packed ring vectorized path multi-queues payload check test with ring size is not power of 2 with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(8) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[1], + self.cbdma_list[6], + self.vhost_core_list[1], + self.cbdma_list[7], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1;rxq0;rxq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=4096 --rxd=4096 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=4096 --rxd=4096 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1142,15 +1218,18 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) self.clear_virtio_user1_stats() + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq1]'" + ) self.start_vhost_testpmd( cores=self.vhost_core_list, eal_param=vhost_eal_param, @@ -1160,30 +1239,53 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - self.send_224_64byte_and_27_4640byte_pkts() - check_dict = {64: 448} + self.send_448_64byte_and_54_4640byte_pkts() + check_dict = {64: 448, 4640: 0} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_vectorized_tx_path_multi_queues_test_indirect_descriptor_with_cbdma( + def test_pakced_packed_ring_vectorized_tx_path_multi_queues_test_indirect_descriptor_with_cbdma_enabled( self, ): """ - Test Case 13: packed virtqueue vm2vm vectorized-tx path multi-queues test indirect descriptor with cbdma enable + Test Case 12: VM2VM packed ring vectorized-tx path multi-queues test indirect descriptor and payload check with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(8) - dmas = self.generate_dms_param(2) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[1], + self.cbdma_list[6], + self.vhost_core_list[1], + self.cbdma_list[7], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq1]'" ) vhost_param = ( - " --nb-cores=1 --txd=256 --rxd=256 --txq=2 --rxq=2 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + " --nb-cores=1 --rxq=2 --txq=2 --txd=256 --rxd=256 --no-flush-rx" + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1210,16 +1312,19 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_251_64byte_and_32_8000byte_pkts() + self.send_502_64byte_and_64_8000byte_pkts() check_dict = {64: 502, 8000: 10} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) if not self.check_2M_env: - self.logger.info("Quit and relaunch vhost with iova=pa") - self.vhost_user_pmd.quit() - self.virtio_user1_pmd.quit() - self.virtio_user0_pmd.quit() + self.vhost_user_pmd.execute_cmd("quit", "#", 60) + self.virtio_user1_pmd.execute_cmd("quit", "#", 60) + self.virtio_user0_pmd.execute_cmd("quit", "#", 60) + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[rxq0;rxq1]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0;txq1]'" + ) self.start_vhost_testpmd( cores=self.vhost_core_list, eal_param=vhost_eal_param, @@ -1239,28 +1344,53 @@ class TestVM2VMVirtioUserCbdma(TestCase): param=virtio0_param, ) - self.send_251_64byte_and_32_8000byte_pkts() + self.send_502_64byte_and_64_8000byte_pkts() check_dict = {64: 502, 8000: 10} self.check_virtio_user1_stats(check_dict) self.check_packet_payload_valid(check_dict) - def test_packed_ring_vectorized_tx_path_test_batch_processing_with_cbdma(self): + def test_packed_ring_vectorized_tx_path_test_batch_processing_with_cbdma_enabled( + self, + ): """ - Test Case 14: packed virtqueue vm2vm vectorized-tx path test batch processing with cbdma enable + Test Case 13: VM2VM packed ring vectorized-tx path test batch processing with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(8) - dmas = self.generate_dms_param(1) - lcore_dma = self.generate_lcore_dma_param( - cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:2] + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8) + lcore_dma = ( + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[1], + self.cbdma_list[1], + self.vhost_core_list[1], + self.cbdma_list[2], + self.vhost_core_list[1], + self.cbdma_list[3], + self.vhost_core_list[1], + self.cbdma_list[4], + self.vhost_core_list[1], + self.cbdma_list[5], + self.vhost_core_list[1], + self.cbdma_list[6], + self.vhost_core_list[1], + self.cbdma_list[7], + ) ) - vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=1,client=1,dmas={},dma_ring_size=2048'".format( - dmas - ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,client=1,dmas={},dma_ring_size=2048'".format( - dmas + vhost_eal_param = ( + "--vdev 'net_vhost0,iface=vhost-net0,queues=1,client=1,dmas=[txq0;rxq0]'" + + " --vdev 'net_vhost1,iface=vhost-net1,queues=1,client=1,dmas=[txq0;rxq0]'" ) vhost_param = ( " --nb-cores=1 --txd=256 --rxd=256 --txq=1 --rxq=1 --no-flush-rx" - + " --lcore-dma={}".format(lcore_dma) + + " --lcore-dma=[%s]" % lcore_dma ) self.start_vhost_testpmd( cores=self.vhost_core_list, @@ -1270,7 +1400,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): iova_mode="va", ) - virtio1_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=1,mrg_rxbuf=1,in_order=1,vectorized=1,queue_size=256" + virtio1_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=1,server=1,packed_vq=1,mrg_rxbuf=1,in_order=1,vectorized=1,queue_size=256" virtio1_param = " --nb-cores=1 --rxq=1 --txq=1 --txd=256 --rxd=256" self.start_virtio_testpmd_with_vhost_net1( cores=self.virtio1_core_list, @@ -1279,7 +1409,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.start_pdump_to_capture_pkt() - virtio0_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=1,mrg_rxbuf=1,in_order=1,vectorized=1,queue_size=256" + virtio0_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=1,server=1,packed_vq=1,mrg_rxbuf=1,in_order=1,vectorized=1,queue_size=256" virtio0_param = " --nb-cores=1 --rxq=1 --txq=1 --txd=256 --rxd=256" self.start_virtio_testpmd_with_vhost_net0( cores=self.virtio0_core_list, @@ -1308,6 +1438,8 @@ class TestVM2VMVirtioUserCbdma(TestCase): """ self.quit_all_testpmd() self.dut.kill_all() + self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.dut.send_expect("killall -s INT %s" % self.pdump_name, "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): -- 2.25.1