From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8009CA0558; Thu, 26 May 2022 15:12:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5814A40151; Thu, 26 May 2022 15:12:22 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 5C9AD40150 for ; Thu, 26 May 2022 15:12:19 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id D7E151FF572; Thu, 26 May 2022 15:12:18 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 3rparurr9apW; Thu, 26 May 2022 15:12:01 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 216181C348A; Thu, 26 May 2022 15:11:39 +0200 (CEST) From: =?UTF-8?q?Juraj=20Linke=C5=A1?= To: Honnappa.Nagarahalli@arm.com, juraj.linkes@pantheon.tech, lijuan.tu@intel.com, ohilyard@iol.unh.edu Cc: dts@dpdk.org Subject: [PATCH v2 5/5] rename base classes 5 Date: Thu, 26 May 2022 13:11:35 +0000 Message-Id: <20220526131135.1643352-6-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220526131135.1643352-1-juraj.linkes@pantheon.tech> References: <20220526131135.1643352-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org tests/* Signed-off-by: Juraj Linkeš --- tests/TestSuite_acl.py | 16 +- tests/TestSuite_af_xdp_2.py | 72 +- tests/TestSuite_asan_smoke.py | 34 +- tests/TestSuite_basic_4k_pages_cbdma.py | 138 +-- tests/TestSuite_blocklist.py | 18 +- tests/TestSuite_cbdma.py | 82 +- tests/TestSuite_checksum_offload.py | 286 ++--- tests/TestSuite_cloud_filter_with_l4_port.py | 86 +- tests/TestSuite_cmdline.py | 28 +- tests/TestSuite_compressdev_isal_pmd.py | 2 +- tests/TestSuite_compressdev_qat_pmd.py | 2 +- tests/TestSuite_compressdev_zlib_pmd.py | 2 +- tests/TestSuite_coremask.py | 26 +- tests/TestSuite_crypto_perf_cryptodev_perf.py | 42 +- tests/TestSuite_dcf_lifecycle.py | 190 +-- tests/TestSuite_ddp_gtp.py | 108 +- tests/TestSuite_ddp_gtp_qregion.py | 110 +- tests/TestSuite_ddp_l2tpv3.py | 144 +-- tests/TestSuite_ddp_mpls.py | 90 +- tests/TestSuite_ddp_ppp_l2tp.py | 86 +- tests/TestSuite_distributor.py | 96 +- tests/TestSuite_dpdk_gro_lib.py | 260 ++-- tests/TestSuite_dpdk_gro_lib_cbdma.py | 128 +- tests/TestSuite_dpdk_gso_lib.py | 296 ++--- tests/TestSuite_dpdk_hugetlbfs_mount_size.py | 126 +- tests/TestSuite_dual_vlan.py | 192 +-- tests/TestSuite_dynamic_config.py | 130 +- tests/TestSuite_dynamic_flowtype.py | 72 +- tests/TestSuite_dynamic_queue.py | 92 +- tests/TestSuite_eeprom_dump.py | 36 +- tests/TestSuite_efd.py | 76 +- ...e_enable_package_download_in_ice_driver.py | 188 +-- tests/TestSuite_ethtool_stats.py | 36 +- tests/TestSuite_eventdev_perf.py | 512 ++++---- tests/TestSuite_eventdev_pipeline.py | 58 +- tests/TestSuite_eventdev_pipeline_perf.py | 278 ++--- tests/TestSuite_example_build.py | 6 +- tests/TestSuite_external_memory.py | 82 +- tests/TestSuite_external_mempool_handler.py | 32 +- tests/TestSuite_fips_cryptodev.py | 10 +- tests/TestSuite_firmware_version.py | 10 +- tests/TestSuite_flexible_rxd.py | 26 +- tests/TestSuite_floating_veb.py | 194 +-- tests/TestSuite_flow_classify.py | 74 +- tests/TestSuite_flow_classify_softnic.py | 537 ++++----- tests/TestSuite_flow_filtering.py | 40 +- tests/TestSuite_generic_flow_api.py | 682 +++++------ tests/TestSuite_hello_world.py | 16 +- tests/TestSuite_hotplug.py | 126 +- tests/TestSuite_hotplug_mp.py | 46 +- tests/TestSuite_i40e_rss_input.py | 1074 ++++++++--------- tests/TestSuite_iavf.py | 343 +++--- tests/TestSuite_iavf_flexible_descriptor.py | 48 +- ...tSuite_iavf_package_driver_error_handle.py | 172 +-- tests/TestSuite_ice_1pps_signal.py | 14 +- tests/TestSuite_ice_advanced_iavf_rss.py | 74 +- ...TestSuite_ice_advanced_iavf_rss_gtpogre.py | 44 +- tests/TestSuite_ice_advanced_iavf_rss_gtpu.py | 52 +- ...uite_ice_advanced_iavf_rss_pppol2tpoudp.py | 52 +- ...advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp.py | 72 +- tests/TestSuite_ice_advanced_rss.py | 54 +- tests/TestSuite_ice_advanced_rss_gtpogre.py | 36 +- tests/TestSuite_ice_advanced_rss_gtpu.py | 36 +- tests/TestSuite_ice_advanced_rss_pppoe.py | 38 +- ..._ice_advanced_rss_vlan_esp_ah_l2tp_pfcp.py | 44 +- tests/TestSuite_ice_dcf_acl_filter.py | 160 +-- tests/TestSuite_ice_dcf_date_path.py | 62 +- tests/TestSuite_ice_dcf_flow_priority.py | 110 +- tests/TestSuite_ice_dcf_qos.py | 69 +- tests/TestSuite_ice_dcf_switch_filter.py | 202 ++-- tests/TestSuite_ice_dcf_switch_filter_gtpu.py | 74 +- .../TestSuite_ice_dcf_switch_filter_pppoe.py | 104 +- tests/TestSuite_ice_ecpri.py | 100 +- tests/TestSuite_ice_fdir.py | 88 +- tests/TestSuite_ice_flow_priority.py | 32 +- tests/TestSuite_ice_iavf_fdir.py | 442 +++---- tests/TestSuite_ice_iavf_fdir_gtpogre.py | 64 +- tests/TestSuite_ice_iavf_fdir_pppol2tpoudp.py | 52 +- ...TestSuite_ice_iavf_ip_fragment_rte_flow.py | 58 +- tests/TestSuite_ice_iavf_rss_configure.py | 42 +- tests/TestSuite_ice_ip_fragment_rte_flow.py | 46 +- tests/TestSuite_ice_limit_value_test.py | 250 ++-- tests/TestSuite_ice_qinq.py | 134 +- tests/TestSuite_ice_rss_configure.py | 22 +- tests/TestSuite_ice_switch_filter.py | 196 +-- tests/TestSuite_ice_switch_filter_pppoe.py | 174 +-- ...tSuite_ice_vf_support_multicast_address.py | 120 +- tests/TestSuite_ieee1588.py | 76 +- tests/TestSuite_inline_ipsec.py | 94 +- tests/TestSuite_interrupt_pmd.py | 54 +- tests/TestSuite_ip_pipeline.py | 339 +++--- tests/TestSuite_ipfrag.py | 106 +- tests/TestSuite_ipgre.py | 94 +- tests/TestSuite_ipsec_gw_cryptodev_func.py | 62 +- tests/TestSuite_ipv4_reassembly.py | 108 +- ...te_ixgbe_vf_get_extra_queue_information.py | 104 +- tests/TestSuite_jumboframes.py | 74 +- tests/TestSuite_keep_alive.py | 32 +- tests/TestSuite_kernelpf_iavf.py | 350 +++--- tests/TestSuite_kni.py | 464 +++---- tests/TestSuite_l2fwd.py | 108 +- tests/TestSuite_l2fwd_cryptodev_func.py | 68 +- tests/TestSuite_l2fwd_jobstats.py | 36 +- tests/TestSuite_l2tp_esp_coverage.py | 224 ++-- tests/TestSuite_l3fwd.py | 10 +- tests/TestSuite_l3fwd_func.py | 86 +- tests/TestSuite_l3fwd_lpm_ipv4.py | 10 +- tests/TestSuite_l3fwd_lpm_ipv4_rfc2544.py | 10 +- tests/TestSuite_l3fwd_lpm_ipv6.py | 10 +- tests/TestSuite_l3fwdacl.py | 216 ++-- tests/TestSuite_large_vf.py | 74 +- tests/TestSuite_link_flowctrl.py | 130 +- tests/TestSuite_link_status_interrupt.py | 122 +- tests/TestSuite_linux_modules.py | 84 +- ...Suite_loopback_multi_paths_port_restart.py | 34 +- tests/TestSuite_loopback_multi_queues.py | 28 +- ...tSuite_loopback_virtio_user_server_mode.py | 60 +- ..._loopback_virtio_user_server_mode_cbdma.py | 50 +- tests/TestSuite_mac_filter.py | 88 +- tests/TestSuite_macsec_for_ixgbe.py | 148 +-- ...Suite_malicious_driver_event_indication.py | 54 +- tests/TestSuite_mdd.py | 90 +- tests/TestSuite_metering_and_policing.py | 190 +-- tests/TestSuite_metrics.py | 93 +- tests/TestSuite_mtu_update.py | 63 +- tests/TestSuite_multicast.py | 48 +- tests/TestSuite_multiple_pthread.py | 44 +- tests/TestSuite_multiprocess.py | 188 +-- tests/TestSuite_nic_single_core_perf.py | 64 +- tests/TestSuite_ntb.py | 97 +- tests/TestSuite_nvgre.py | 190 +-- tests/TestSuite_packet_capture.py | 248 ++-- tests/TestSuite_packet_ordering.py | 50 +- tests/TestSuite_perf_virtio_user_loopback.py | 24 +- tests/TestSuite_pf_smoke.py | 82 +- tests/TestSuite_pipeline.py | 815 +++++++------ tests/TestSuite_pmd.py | 168 +-- tests/TestSuite_pmd_bonded.py | 640 +++++----- tests/TestSuite_pmd_bonded_8023ad.py | 28 +- tests/TestSuite_pmd_stacked_bonded.py | 38 +- tests/TestSuite_pmdpcap.py | 80 +- tests/TestSuite_pmdrss_hash.py | 266 ++-- tests/TestSuite_pmdrssreta.py | 88 +- tests/TestSuite_port_control.py | 64 +- tests/TestSuite_port_representor.py | 100 +- tests/TestSuite_power_bidirection_channel.py | 82 +- tests/TestSuite_power_branch_ratio.py | 109 +- tests/TestSuite_power_empty_poll.py | 69 +- tests/TestSuite_power_negative.py | 76 +- tests/TestSuite_power_pbf.py | 56 +- tests/TestSuite_power_pstate.py | 48 +- tests/TestSuite_power_telemetry.py | 59 +- tests/TestSuite_ptpclient.py | 70 +- tests/TestSuite_ptype_mapping.py | 64 +- tests/TestSuite_pvp_diff_qemu_version.py | 92 +- .../TestSuite_pvp_multi_paths_performance.py | 66 +- ...lti_paths_vhost_single_core_performance.py | 62 +- ...ti_paths_virtio_single_core_performance.py | 64 +- ...Suite_pvp_qemu_multi_paths_port_restart.py | 74 +- tests/TestSuite_pvp_share_lib.py | 70 +- tests/TestSuite_pvp_vhost_user_reconnect.py | 148 +-- tests/TestSuite_pvp_virtio_bonding.py | 64 +- .../TestSuite_pvp_virtio_user_2M_hugepages.py | 58 +- tests/TestSuite_pvp_virtio_user_4k_pages.py | 74 +- ...p_virtio_user_multi_queues_port_restart.py | 60 +- tests/TestSuite_qinq_filter.py | 176 +-- tests/TestSuite_qos_api.py | 102 +- tests/TestSuite_qos_meter.py | 75 +- tests/TestSuite_queue_region.py | 270 ++--- tests/TestSuite_queue_start_stop.py | 74 +- tests/TestSuite_rss_key_update.py | 92 +- tests/TestSuite_rss_to_rte_flow.py | 426 +++---- tests/TestSuite_rte_flow.py | 38 +- tests/TestSuite_rteflow_priority.py | 192 +-- tests/TestSuite_runtime_vf_queue_number.py | 110 +- ...estSuite_runtime_vf_queue_number_kernel.py | 96 +- ...stSuite_runtime_vf_queue_number_maxinum.py | 58 +- tests/TestSuite_rxtx_callbacks.py | 34 +- tests/TestSuite_rxtx_offload.py | 413 ++++--- tests/TestSuite_scatter.py | 38 +- tests/TestSuite_short_live.py | 100 +- tests/TestSuite_shutdown_api.py | 420 +++---- tests/TestSuite_skeleton.py | 34 +- tests/TestSuite_softnic.py | 104 +- tests/TestSuite_speed_capabilities.py | 22 +- tests/TestSuite_sriov_kvm.py | 190 +-- tests/TestSuite_stats_checks.py | 60 +- tests/TestSuite_telemetry.py | 72 +- tests/TestSuite_testpmd_perf.py | 8 +- tests/TestSuite_timer.py | 14 +- tests/TestSuite_tso.py | 340 +++--- tests/TestSuite_tx_preparation.py | 81 +- tests/TestSuite_uni_pkt.py | 64 +- tests/TestSuite_unit_tests_cmdline.py | 12 +- tests/TestSuite_unit_tests_crc.py | 12 +- tests/TestSuite_unit_tests_cryptodev_func.py | 14 +- tests/TestSuite_unit_tests_dump.py | 134 +- tests/TestSuite_unit_tests_eal.py | 236 ++-- tests/TestSuite_unit_tests_event_timer.py | 26 +- tests/TestSuite_unit_tests_kni.py | 20 +- tests/TestSuite_unit_tests_loopback.py | 84 +- tests/TestSuite_unit_tests_lpm.py | 42 +- tests/TestSuite_unit_tests_mbuf.py | 12 +- tests/TestSuite_unit_tests_mempool.py | 22 +- tests/TestSuite_unit_tests_pmd_perf.py | 38 +- tests/TestSuite_unit_tests_power.py | 32 +- tests/TestSuite_unit_tests_qos.py | 32 +- tests/TestSuite_unit_tests_ring.py | 20 +- tests/TestSuite_unit_tests_ringpmd.py | 22 +- tests/TestSuite_unit_tests_timer.py | 22 +- tests/TestSuite_userspace_ethtool.py | 224 ++-- tests/TestSuite_vdev_primary_secondary.py | 66 +- tests/TestSuite_veb_switch.py | 184 +-- tests/TestSuite_vf_daemon.py | 190 +-- tests/TestSuite_vf_interrupt_pmd.py | 176 +-- tests/TestSuite_vf_jumboframe.py | 88 +- tests/TestSuite_vf_kernel.py | 598 ++++----- tests/TestSuite_vf_l3fwd.py | 92 +- tests/TestSuite_vf_l3fwd_em_kernelpf.py | 10 +- tests/TestSuite_vf_l3fwd_kernelpf.py | 10 +- tests/TestSuite_vf_l3fwd_lpm_ipv4_kernelpf.py | 10 +- ...uite_vf_l3fwd_lpm_ipv4_rfc2544_kernelpf.py | 10 +- tests/TestSuite_vf_l3fwd_lpm_ipv6_kernelpf.py | 10 +- tests/TestSuite_vf_macfilter.py | 94 +- tests/TestSuite_vf_offload.py | 220 ++-- tests/TestSuite_vf_packet_rxtx.py | 142 +-- tests/TestSuite_vf_port_start_stop.py | 64 +- tests/TestSuite_vf_rss.py | 144 +-- tests/TestSuite_vf_single_core_perf.py | 88 +- tests/TestSuite_vf_smoke.py | 104 +- tests/TestSuite_vf_to_vf_nic_bridge.py | 86 +- tests/TestSuite_vf_vlan.py | 148 +-- tests/TestSuite_vhost_1024_ethports.py | 34 +- tests/TestSuite_vhost_cbdma.py | 100 +- tests/TestSuite_vhost_event_idx_interrupt.py | 94 +- tests/TestSuite_vhost_multi_queue_qemu.py | 190 +-- tests/TestSuite_vhost_pmd_xstats.py | 54 +- tests/TestSuite_vhost_user_interrupt.py | 56 +- tests/TestSuite_vhost_user_live_migration.py | 292 ++--- tests/TestSuite_vhost_virtio_pmd_interrupt.py | 124 +- ...tSuite_vhost_virtio_pmd_interrupt_cbdma.py | 126 +- .../TestSuite_vhost_virtio_user_interrupt.py | 116 +- ...Suite_vhost_virtio_user_interrupt_cbdma.py | 78 +- tests/TestSuite_virtio_event_idx_interrupt.py | 98 +- ...tSuite_virtio_event_idx_interrupt_cbdma.py | 108 +- .../TestSuite_virtio_ipsec_cryptodev_func.py | 168 +-- tests/TestSuite_virtio_perf_cryptodev_func.py | 96 +- tests/TestSuite_virtio_pvp_regression.py | 98 +- tests/TestSuite_virtio_smoke.py | 46 +- tests/TestSuite_virtio_unit_cryptodev_func.py | 94 +- ...stSuite_virtio_user_as_exceptional_path.py | 164 +-- ...te_virtio_user_for_container_networking.py | 58 +- tests/TestSuite_vlan.py | 116 +- tests/TestSuite_vlan_ethertype_config.py | 198 +-- tests/TestSuite_vm2vm_virtio_net_perf.py | 104 +- .../TestSuite_vm2vm_virtio_net_perf_cbdma.py | 94 +- tests/TestSuite_vm2vm_virtio_pmd.py | 306 ++--- tests/TestSuite_vm2vm_virtio_pmd_cbdma.py | 66 +- tests/TestSuite_vm2vm_virtio_user.py | 104 +- tests/TestSuite_vm2vm_virtio_user_cbdma.py | 66 +- tests/TestSuite_vm_hotplug.py | 136 +-- tests/TestSuite_vm_power_manager.py | 78 +- tests/TestSuite_vm_pw_mgmt_policy.py | 162 +-- tests/TestSuite_vmdq.py | 82 +- tests/TestSuite_vmdq_dcb.py | 54 +- ..._pvp_multi_paths_performance_with_cbdma.py | 102 +- tests/TestSuite_vswitch_sample_cbdma.py | 202 ++-- tests/TestSuite_vxlan.py | 323 +++-- tests/TestSuite_vxlan_gpe_support_in_i40e.py | 70 +- tests/bonding.py | 88 +- tests/compress_common.py | 32 +- tests/cryptodev_common.py | 18 +- tests/flexible_common.py | 48 +- tests/perf_test_base.py | 136 +-- tests/rte_flow_common.py | 64 +- tests/smoke_base.py | 34 +- 276 files changed, 15849 insertions(+), 15872 deletions(-) diff --git a/tests/TestSuite_acl.py b/tests/TestSuite_acl.py index e8b536f1..6c40d689 100644 --- a/tests/TestSuite_acl.py +++ b/tests/TestSuite_acl.py @@ -7,15 +7,15 @@ from framework.test_case import TestCase class TestACL(TestCase): def install_acl_rules(self): - # copy 'dep/test-acl-input.tar.gz' from tester to DUT, + # copy 'dep/test-acl-input.tar.gz' from TG to SUT, # and unpack the tarball into temporary directory. self.clean_acl_rules() - self.dut.session.copy_file_to(f"dep/{self.acl_tarball}", "/tmp") - self.dut.send_expect(f"tar xf /tmp/{self.acl_tarball} --directory=/tmp", "# ") + self.sut_node.session.copy_file_to(f"dep/{self.acl_tarball}", "/tmp") + self.sut_node.send_expect(f"tar xf /tmp/{self.acl_tarball} --directory=/tmp", "# ") def clean_acl_rules(self): # remove the temporary tarball file and directory - self.dut.send_expect( + self.sut_node.send_expect( f"rm -rf /tmp/{self.acl_tarball} {self.acl_rules_dir}", "# ", 20 ) @@ -25,9 +25,9 @@ class TestACL(TestCase): """ # build ${DPDK}//app/dpdk-test-acl self.test_acl_sh = "app/test-acl/test-acl.sh" - out = self.dut.send_expect(f"ls -l {self.test_acl_sh}", "# ") + out = self.sut_node.send_expect(f"ls -l {self.test_acl_sh}", "# ") self.logger.info(f"test_acl_sh: {self.test_acl_sh}") - self.test_acl_bin = self.dut.apps_name["test-acl"] + self.test_acl_bin = self.sut_node.apps_name["test-acl"] self.logger.info(f"test_acl_app: {self.test_acl_bin}") # prepare test-acl-input directory @@ -45,7 +45,7 @@ class TestACL(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ @@ -86,7 +86,7 @@ class TestACL(TestCase): "257", ): acl_test_cmd = f"/bin/bash -x {self.test_acl_sh} {self.test_acl_bin} {self.acl_rules_dir} {alg} {burst_size}" - out = self.dut.send_expect(acl_test_cmd, "# ", 1200, trim_whitespace=False) + out = self.sut_node.send_expect(acl_test_cmd, "# ", 1200, trim_whitespace=False) self.verify("FAILED" not in out, f"for details see TestACL.log") self.logger.info("All tests have ended successfully") diff --git a/tests/TestSuite_af_xdp_2.py b/tests/TestSuite_af_xdp_2.py index 8e2fa9c3..38ee4836 100644 --- a/tests/TestSuite_af_xdp_2.py +++ b/tests/TestSuite_af_xdp_2.py @@ -6,10 +6,10 @@ import os import re import time -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestAfXdp(TestCase): @@ -20,10 +20,10 @@ class TestAfXdp(TestCase): # self.verify(self.nic in ("I40E_40G-QSFP_A"), "the port can not run this suite") self.frame_sizes = [64, 128, 256, 512, 1024, 1518] - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.header_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["udp"] @@ -36,29 +36,29 @@ class TestAfXdp(TestCase): self.frame_sizes = self.get_suite_cfg()["packet_sizes"] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.pktgen_helper = PacketGeneratorHelper() + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.pktgen_helper = TrafficGeneratorStream() - self.dut.restore_interfaces() - self.irqs_set = self.dut.new_session(suite="irqs-set") + self.sut_node.restore_interfaces() + self.irqs_set = self.sut_node.new_session(suite="irqs-set") def set_up(self): pass def set_port_queue(self, intf): - self.dut.send_expect( + self.sut_node.send_expect( "ethtool -L %s combined %d" % (intf, self.nb_cores / self.port_num), "# " ) def config_stream(self, rx_port, frame_size): tgen_input = [] - dst_mac = self.dut.get_mac_address(self.dut_ports[rx_port]) - pkt = Packet(pkt_len=frame_size) - pkt.config_layers( + dst_mac = self.sut_node.get_mac_address(self.sut_ports[rx_port]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_len=frame_size) + scapy_pkt_builder.config_layers( [ ("ether", {"dst": dst_mac}), ("ipv4", {"dst": "192.168.%d.1" % (rx_port + 1), "proto": 255}), @@ -67,7 +67,7 @@ class TestAfXdp(TestCase): pcap = os.path.join( self.out_path, "af_xdp_%d_%d_%d.pcap" % (self.port_num, rx_port, frame_size) ) - pkt.save_pcapfile(None, pcap) + scapy_pkt_builder.save_pcapfile(None, pcap) tgen_input.append((rx_port, rx_port, pcap)) return tgen_input @@ -76,10 +76,10 @@ class TestAfXdp(TestCase): tgen_input = [] rule = self.rule[rule_index] - pkt = Packet(pkt_len=frame_size) - pkt.config_layers([("udp", {"src": rule[-2], "dst": rule[-1]})]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_len=frame_size) + scapy_pkt_builder.config_layers([("udp", {"src": rule[-2], "dst": rule[-1]})]) pcap = os.path.join(self.out_path, "af_xdp_%d_%d.pcap" % (rule[-2], frame_size)) - pkt.save_pcapfile(None, pcap) + scapy_pkt_builder.save_pcapfile(None, pcap) tgen_input.append((rule[0], rule[0], pcap)) return tgen_input @@ -87,7 +87,7 @@ class TestAfXdp(TestCase): def ethtool_set_rule(self): rule_id, rule = 1, [] for i in range(self.port_num): - intf = self.dut.ports_info[i]["port"].get_interface_name() + intf = self.sut_node.ports_info[i]["port"].get_interface_name() self.irqs_set.send_expect("ethtool -N %s rx-flow-hash udp4 fn" % intf, "# ") self.irqs_set.send_expect( "ethtool -N %s flow-type udp4 src-port 4243 dst-port 4243 action 0 loc %d" @@ -115,7 +115,7 @@ class TestAfXdp(TestCase): core_config = "1S/%dC/1T" % ( self.nb_cores + 1 + max(self.port_num, self.vdev_num) * self.queue_number ) - self.core_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + self.core_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) def assign_port_core(self, separate=True): if separate: @@ -128,7 +128,7 @@ class TestAfXdp(TestCase): ][-max(self.port_num, self.vdev_num) * self.queue_number :] for i in range(self.port_num): - intf = self.dut.ports_info[i]["port"].get_interface_name() + intf = self.sut_node.ports_info[i]["port"].get_interface_name() cores = ",".join( core_list[i * self.queue_number : (i + 1) * self.queue_number] ) @@ -138,7 +138,7 @@ class TestAfXdp(TestCase): out = self.irqs_set.send_expect(command, "# ") self.verify( "No such file or directory" not in out, - "can not find the set_irq_affinity in dut root", + "can not find the set_irq_affinity in SUT root", ) time.sleep(1) @@ -146,7 +146,7 @@ class TestAfXdp(TestCase): vdev_list = [] if self.port_num == 1: - intf = self.dut.ports_info[0]["port"].get_interface_name() + intf = self.sut_node.ports_info[0]["port"].get_interface_name() self.set_port_queue(intf) time.sleep(1) for i in range(self.vdev_num): @@ -161,7 +161,7 @@ class TestAfXdp(TestCase): else: for i in range(self.port_num): vdev = "" - intf = self.dut.ports_info[i]["port"].get_interface_name() + intf = self.sut_node.ports_info[i]["port"].get_interface_name() self.set_port_queue(intf) vdev = "net_af_xdp%d,iface=%s" % (i, intf) vdev_list.append(vdev) @@ -182,14 +182,14 @@ class TestAfXdp(TestCase): else: rss_ip = "" - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list[ : -max(self.port_num, self.vdev_num) * self.queue_number ], vdevs=vdev, no_pci=True, ) - app_name = self.dut.apps_name["test-pmd"] + app_name = self.sut_node.apps_name["test-pmd"] command = ( app_name + " %s --log-level=pmd.net.af_xdp:8 -- -i %s %s --auto-start --nb-cores=%d --rxq=%d " @@ -206,7 +206,7 @@ class TestAfXdp(TestCase): ) self.logger.info("start testpmd") - self.dut.send_expect(command, "testpmd> ", 120) + self.sut_node.send_expect(command, "testpmd> ", 120) def create_table(self, index=1): if self.port_num == 2 or index == 2: @@ -239,18 +239,18 @@ class TestAfXdp(TestCase): traffic_opt = {"delay": 5} # clear streams before add new streams - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() - # run packet generator + # run traffic generator fields_config = { "ip": { "dst": {"action": "random"}, }, } streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, fields_config, self.tester.pktgen + tgen_input, 100, fields_config, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -333,7 +333,7 @@ class TestAfXdp(TestCase): result.append(Mpps) result.append(throughput) - self.out = self.dut.send_expect("stop", "testpmd> ", 60) + self.out = self.sut_node.send_expect("stop", "testpmd> ", 60) if self.queue_number == 1: self.check_packets_of_each_port(i) @@ -342,7 +342,7 @@ class TestAfXdp(TestCase): else: self.check_packets_of_each_queue(i) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) self.update_table_info(result) @@ -468,7 +468,7 @@ class TestAfXdp(TestCase): self.result_table_print() def tear_down(self): - self.dut.send_expect("quit", "#", 60) + self.sut_node.send_expect("quit", "#", 60) def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_asan_smoke.py b/tests/TestSuite_asan_smoke.py index 14e18edd..47fbbbf9 100644 --- a/tests/TestSuite_asan_smoke.py +++ b/tests/TestSuite_asan_smoke.py @@ -2,8 +2,8 @@ # Copyright(c) 2022 Intel Corporation # -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .smoke_base import SmokeTest @@ -15,26 +15,26 @@ class TestASanSmoke(TestCase): Run at the start of each test suite. Generic filter Prerequistites """ - self.smoke_dut_ports = self.dut.get_ports(self.nic) - self.ports_pci = [self.dut.ports_info[self.smoke_dut_ports[0]]["pci"]] + self.smoke_sut_ports = self.sut_node.get_ports(self.nic) + self.ports_pci = [self.sut_node.ports_info[self.smoke_sut_ports[0]]["pci"]] # Verify that enough ports are available - self.verify(len(self.smoke_dut_ports) >= 1, "Insufficient ports") - self.tester_port0 = self.tester.get_local_port(self.smoke_dut_ports[0]) - self.smoke_tester_nic = self.tester.get_interface(self.tester_port0) - self.smoke_tester_mac = self.tester.get_mac(self.smoke_dut_ports[0]) - self.smoke_dut_mac = self.dut.get_mac_address(self.smoke_dut_ports[0]) + self.verify(len(self.smoke_sut_ports) >= 1, "Insufficient ports") + self.tg_port0 = self.tg_node.get_local_port(self.smoke_sut_ports[0]) + self.smoke_tg_nic = self.tg_node.get_interface(self.tg_port0) + self.smoke_tg_mac = self.tg_node.get_mac(self.smoke_sut_ports[0]) + self.smoke_sut_mac = self.sut_node.get_mac_address(self.smoke_sut_ports[0]) self.cores = "1S/5C/1T" # check core num - core_list = self.dut.get_core_list(self.cores) + core_list = self.sut_node.get_core_list(self.cores) self.verify(len(core_list) >= 5, "Insufficient cores for testing") - # init Packet(), SmokeTest(), PmdOutput() - self.pkt = Packet() + # init ScapyTrafficGenerator(), SmokeTest(), PmdOutput() + self.scapy_pkt_builder = ScapyPacketBuilder() self.smoke_base = SmokeTest(self) - self.pmd_out = PmdOutput(self.dut) + self.pmd_out = PmdOutput(self.sut_node) # build dpdk with ASan tool - self.dut.build_install_dpdk( + self.sut_node.build_install_dpdk( target=self.target, extra_options="-Dbuildtype=debug -Db_lundef=false -Db_sanitize=address", ) @@ -55,13 +55,13 @@ class TestASanSmoke(TestCase): """ Run after each test suite. """ - self.dut.send_expect("quit", "#") - self.dut.kill_all() - self.dut.build_install_dpdk(self.target) + self.sut_node.send_expect("quit", "#") + self.sut_node.kill_all() + self.sut_node.build_install_dpdk(self.target) def check_testpmd_status(self): cmd = "ps -aux | grep testpmd | grep -v grep" - out = self.dut.send_expect(cmd, "#", 15, alt_session=True) + out = self.sut_node.send_expect(cmd, "#", 15, alt_session=True) self.verify( "testpmd" in out, "After build dpdk with ASan, start testpmd failed" ) diff --git a/tests/TestSuite_basic_4k_pages_cbdma.py b/tests/TestSuite_basic_4k_pages_cbdma.py index 45e78f1e..74e48570 100644 --- a/tests/TestSuite_basic_4k_pages_cbdma.py +++ b/tests/TestSuite_basic_4k_pages_cbdma.py @@ -11,10 +11,10 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -23,46 +23,46 @@ class TestBasic4kPagesCbdma(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( self.cores_num >= 4, "There has not enought cores to test this suite %s" % self.suite_name, ) - self.cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + self.cores_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.cores_list[0:9] self.virtio0_core_list = self.cores_list[9:11] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) - self.pci_info = self.dut.ports_info[0]["pci"] - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.out_path = "/tmp/%s" % self.suite_name - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.number_of_ports = 1 - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.vm_num = 2 self.virtio_ip1 = "1.1.1.1" self.virtio_ip2 = "1.1.1.2" self.virtio_mac1 = "52:54:00:00:00:01" self.virtio_mac2 = "52:54:00:00:00:02" - self.base_dir = self.dut.base_dir.replace("~", "/root") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf /tmp/vhost-net*", "# ") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.sut_node.send_expect("rm -rf /tmp/vhost-net*", "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") self.umount_tmpfs_for_4k() # Prepare the result table self.table_header = ["Frame"] @@ -71,7 +71,7 @@ class TestBasic4kPagesCbdma(TestCase): self.table_header.append("Queue Num") self.table_header.append("% linerate") self.result_table_create(self.table_header) - self.vm_dut = [] + self.vm_sut = [] self.vm = [] def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): @@ -81,7 +81,7 @@ class TestBasic4kPagesCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -105,7 +105,7 @@ class TestBasic4kPagesCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -113,11 +113,11 @@ class TestBasic4kPagesCbdma(TestCase): ) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -125,22 +125,22 @@ class TestBasic4kPagesCbdma(TestCase): def send_and_verify(self): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/vhost.pcap" % self.out_path) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/vhost.pcap" % self.out_path) tgen_input.append((tx_port, rx_port, "%s/vhost.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 # self.verify(Mpps > self.check_value[frame_size], # "%s of frame size %d speed verify failed, expect %s, result %s" % ( @@ -194,8 +194,8 @@ class TestBasic4kPagesCbdma(TestCase): vm_params["opt_queue"] = opt_queue for i in range(self.vm_num): - vm_dut = None - vm_info = VM(self.dut, "vm%d" % i, vm_config) + vm_sut = None + vm_info = VM(self.sut_node, "vm%d" % i, vm_config) vm_params["driver"] = "vhost-user" if not server_mode: @@ -207,31 +207,31 @@ class TestBasic4kPagesCbdma(TestCase): vm_info.set_vm_device(**vm_params) time.sleep(3) try: - vm_dut = vm_info.start(set_target=False) - if vm_dut is None: + vm_sut = vm_info.start(set_target=False) + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) raise e - self.vm_dut.append(vm_dut) + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def config_vm_ip(self): """ set virtio device IP and run arp protocal """ - vm1_intf = self.vm_dut[0].ports_info[0]["intf"] - vm2_intf = self.vm_dut[1].ports_info[0]["intf"] - self.vm_dut[0].send_expect( + vm1_intf = self.vm_sut[0].ports_info[0]["intf"] + vm2_intf = self.vm_sut[1].ports_info[0]["intf"] + self.vm_sut[0].send_expect( "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10 ) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "arp -s %s %s" % (self.virtio_ip2, self.virtio_mac2), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10 ) @@ -239,12 +239,12 @@ class TestBasic4kPagesCbdma(TestCase): """ set virtio device combined """ - vm1_intf = self.vm_dut[0].ports_info[0]["intf"] - vm2_intf = self.vm_dut[1].ports_info[0]["intf"] - self.vm_dut[0].send_expect( + vm1_intf = self.vm_sut[0].ports_info[0]["intf"] + vm2_intf = self.vm_sut[1].ports_info[0]["intf"] + self.vm_sut[0].send_expect( "ethtool -L %s combined %d" % (vm1_intf, combined), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ethtool -L %s combined %d" % (vm2_intf, combined), "#", 10 ) @@ -254,10 +254,10 @@ class TestBasic4kPagesCbdma(TestCase): """ iperf_server = "iperf -s -i 1" iperf_client = "iperf -c {} -i 1 -t 60".format(self.virtio_ip1) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "{} > iperf_server.log &".format(iperf_server), "", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "{} > iperf_client.log &".format(iperf_client), "", 60 ) time.sleep(60) @@ -268,8 +268,8 @@ class TestBasic4kPagesCbdma(TestCase): """ self.table_header = ["Mode", "[M|G]bits/sec"] self.result_table_create(self.table_header) - self.vm_dut[0].send_expect("pkill iperf", "# ") - self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir) + self.vm_sut[0].send_expect("pkill iperf", "# ") + self.vm_sut[1].session.copy_file_from("%s/iperf_client.log" % self.sut_node.base_dir) fp = open("./iperf_client.log") fmsg = fp.read() fp.close() @@ -289,8 +289,8 @@ class TestBasic4kPagesCbdma(TestCase): # print iperf resut self.result_table_print() # rm the iperf log file in vm - self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10) + self.vm_sut[0].send_expect("rm iperf_server.log", "#", 10) + self.vm_sut[1].send_expect("rm iperf_client.log", "#", 10) def verify_xstats_info_on_vhost(self): """ @@ -315,8 +315,8 @@ class TestBasic4kPagesCbdma(TestCase): Prepare tmpfs with 4K-pages """ for num in range(number): - self.dut.send_expect("mkdir /mnt/tmpfs_nohuge{}".format(num), "# ") - self.dut.send_expect( + self.sut_node.send_expect("mkdir /mnt/tmpfs_nohuge{}".format(num), "# ") + self.sut_node.send_expect( "mount tmpfs /mnt/tmpfs_nohuge{} -t tmpfs -o size=4G".format(num), "# " ) @@ -324,21 +324,21 @@ class TestBasic4kPagesCbdma(TestCase): """ Prepare tmpfs with 4K-pages """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "mount |grep 'mnt/tmpfs' |awk -F ' ' {'print $3'}", "#" ) mount_infos = out.replace("\r", "").split("\n") if len(mount_infos) != 0: for mount_info in mount_infos: - self.dut.send_expect("umount {}".format(mount_info), "# ") + self.sut_node.send_expect("umount {}".format(mount_info), "# ") def umount_huge_pages(self): - self.dut.send_expect("mount |grep '/mnt/huge' |awk -F ' ' {'print $3'}", "#") - self.dut.send_expect("umount /mnt/huge", "# ") + self.sut_node.send_expect("mount |grep '/mnt/huge' |awk -F ' ' {'print $3'}", "#") + self.sut_node.send_expect("umount /mnt/huge", "# ") def mount_huge_pages(self): - self.dut.send_expect("mkdir -p /mnt/huge", "# ") - self.dut.send_expect("mount -t hugetlbfs nodev /mnt/huge", "# ") + self.sut_node.send_expect("mkdir -p /mnt/huge", "# ") + self.sut_node.send_expect("mount -t hugetlbfs nodev /mnt/huge", "# ") def test_perf_pvp_virtio_user_split_ring_with_4K_pages_and_cbdma_enable(self): """ @@ -350,7 +350,7 @@ class TestBasic4kPagesCbdma(TestCase): vhost_param = " --no-numa --socket-num={} --lcore-dma=[{}]".format( self.ports_socket, lcore_dma ) - ports = [self.dut.ports_info[0]["pci"]] + ports = [self.sut_node.ports_info[0]["pci"]] for i in self.cbdma_list: ports.append(i) self.start_vhost_user_testpmd( @@ -380,7 +380,7 @@ class TestBasic4kPagesCbdma(TestCase): vhost_param = " --no-numa --socket-num={} --lcore-dma=[{}]".format( self.ports_socket, lcore_dma ) - ports = [self.dut.ports_info[0]["pci"]] + ports = [self.sut_node.ports_info[0]["pci"]] for i in self.cbdma_list: ports.append(i) self.start_vhost_user_testpmd( @@ -406,7 +406,7 @@ class TestBasic4kPagesCbdma(TestCase): """ self.virtio_user0_pmd.quit() self.vhost_user_pmd.quit() - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") self.bind_cbdma_device_to_kernel() self.umount_tmpfs_for_4k() @@ -414,5 +414,5 @@ class TestBasic4kPagesCbdma(TestCase): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user0) diff --git a/tests/TestSuite_blocklist.py b/tests/TestSuite_blocklist.py index 635502ff..71c5bdad 100644 --- a/tests/TestSuite_blocklist.py +++ b/tests/TestSuite_blocklist.py @@ -20,13 +20,13 @@ class TestBlockList(TestCase): Requirements: Two Ports """ - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) self.verify(len(self.ports) >= 2, "Insufficient ports for testing") [arch, machine, self.env, toolchain] = self.target.split("-") self.regexp_blocklisted_port = ( "Probe PCI driver: net.*%s \(%s\) device: .*%s \(socket [-0-9]+\)" ) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ @@ -46,10 +46,10 @@ class TestBlockList(TestCase): # Look for the PCI ID of each card followed by # "Device is blocklisted, not initializing" but avoid to consume more # than one device. - port_pci = self.dut.ports_info[port]["pci"] + port_pci = self.sut_node.ports_info[port]["pci"] regexp_blocklisted_port = self.regexp_blocklisted_port % ( DRIVERS.get(self.nic), - self.dut.ports_info[port]["type"], + self.sut_node.ports_info[port]["type"], port_pci, ) matching_ports = utils.regexp(output, regexp_blocklisted_port, True) @@ -77,9 +77,9 @@ class TestBlockList(TestCase): """ Run testpmd with one port blocklisted. """ - self.dut.kill_all() + self.sut_node.kill_all() out = self.pmdout.start_testpmd( - "Default", eal_param="-b %s" % self.dut.ports_info[0]["pci"] + "Default", eal_param="-b %s" % self.sut_node.ports_info[0]["pci"] ) self.check_blocklisted_ports(out, self.ports[1:]) @@ -87,11 +87,11 @@ class TestBlockList(TestCase): """ Run testpmd with all but one port blocklisted. """ - self.dut.kill_all() + self.sut_node.kill_all() ports_to_blocklist = self.ports[:-1] cmdline = "" for port in ports_to_blocklist: - cmdline += " -b %s" % self.dut.ports_info[port]["pci"] + cmdline += " -b %s" % self.sut_node.ports_info[port]["pci"] out = self.pmdout.start_testpmd("Default", eal_param=cmdline) blocklisted_ports = self.check_blocklisted_ports(out, ports_to_blocklist, True) @@ -100,7 +100,7 @@ class TestBlockList(TestCase): Run after each test case. Quit testpmd. """ - self.dut.send_expect("quit", "# ", 10) + self.sut_node.send_expect("quit", "# ", 10) def tear_down_all(self): """ diff --git a/tests/TestSuite_cbdma.py b/tests/TestSuite_cbdma.py index d5198472..79541122 100644 --- a/tests/TestSuite_cbdma.py +++ b/tests/TestSuite_cbdma.py @@ -10,9 +10,9 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import TRANSMIT_CONT from framework.test_case import TestCase @@ -24,16 +24,16 @@ class TestCBDMA(TestCase): self.frame_sizes = [64, 256, 512, 1024, 1518] self.cbdma_dev_infos = [] self.device_str = None - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.get_cbdma_ports_info_and_bind_to_dpdk() # default --proc-type=primary, case 1-6 use default values, case7 use --proc-type=secondary self.cbdma_proc = "--proc-type=primary" # default v_dev is None, case 1-6 use default None values, case7 use --vdev net_null_0 self.v_dev = "" - out = self.dut.build_dpdk_apps("./examples/dma") - self.dma_path = self.dut.apps_name["dma"] + out = self.sut_node.build_dpdk_apps("./examples/dma") + self.dma_path = self.sut_node.apps_name["dma"] self.verify("Error" not in out, "compilation dma error") def set_up(self): @@ -49,14 +49,14 @@ class TestCBDMA(TestCase): self.table_header.append("Updating MAC") self.table_header.append("% linerate") self.result_table_create(self.table_header) - self.send_session = self.dut.new_session("new_session") + self.send_session = self.sut_node.new_session("new_session") def get_core_list(self): """ get cores list depend on thread_num """ core_config = "1S/%dC/1T" % self.cbdma_cores_num - self.core_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + self.core_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) self.verify( len(self.core_list) >= self.cbdma_cores_num, "There no enough cores to run this case", @@ -66,7 +66,7 @@ class TestCBDMA(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -88,7 +88,7 @@ class TestCBDMA(TestCase): "There no enough cbdma device to run this suite", ) self.device_str = " ".join(self.cbdma_dev_infos[0:8]) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -97,11 +97,11 @@ class TestCBDMA(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -111,7 +111,7 @@ class TestCBDMA(TestCase): def get_ports_info(self): dev_info = [] for i in range(self.cbdma_nic_dev_num): - dev_info.append(self.dut.ports_info[i]["pci"]) + dev_info.append(self.sut_node.ports_info[i]["pci"]) for i in range(self.cbdma_dma_dev_num): dev_info.append(self.cbdma_dev_infos[i]) return dev_info @@ -170,23 +170,23 @@ class TestCBDMA(TestCase): def config_stream(self, frame_size): stream_ids = [] for port in range(self.cbdma_nic_dev_num): - tx_port = self.tester.get_local_port(self.dut_ports[port]) + tx_port = self.tg_node.get_local_port(self.sut_ports[port]) rx_port = tx_port if self.cbdma_nic_dev_num > 1: if port % self.cbdma_nic_dev_num == 0: - rx_port = self.tester.get_local_port(self.dut_ports[port + 1]) + rx_port = self.tg_node.get_local_port(self.sut_ports[port + 1]) else: - rx_port = self.tester.get_local_port(self.dut_ports[port - 1]) - dst_mac = self.dut.get_mac_address(self.dut_ports[port]) + rx_port = self.tg_node.get_local_port(self.sut_ports[port - 1]) + dst_mac = self.sut_node.get_mac_address(self.sut_ports[port]) # pkt config - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % dst_mac}) - pkt.config_layer("udp", {"src": 1111, "dst": 1112}) - pkt.save_pcapfile( - self.tester, "%s/cbdma_%d.pcap" % (self.tester.tmp_file, port) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % dst_mac}) + scapy_pkt_builder.config_layer("udp", {"src": 1111, "dst": 1112}) + scapy_pkt_builder.save_pcapfile( + self.tg_node, "%s/cbdma_%d.pcap" % (self.tg_node.tmp_file, port) ) stream_option = { - "pcap": "%s/cbdma_%d.pcap" % (self.tester.tmp_file, port), + "pcap": "%s/cbdma_%d.pcap" % (self.tg_node.tmp_file, port), "fields_config": { "ip": { "src": { @@ -202,22 +202,22 @@ class TestCBDMA(TestCase): "transmit_mode": TRANSMIT_CONT, }, } - stream_id = self.tester.pktgen.add_stream( - tx_port, tx_port, "%s/cbdma_%d.pcap" % (self.tester.tmp_file, port) + stream_id = self.tg_node.perf_tg.add_stream( + tx_port, tx_port, "%s/cbdma_%d.pcap" % (self.tg_node.tmp_file, port) ) - self.tester.pktgen.config_stream(stream_id, stream_option) + self.tg_node.perf_tg.config_stream(stream_id, stream_option) stream_ids.append(stream_id) return stream_ids def send_and_verify_throughput(self, check_channel=False): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() stream_ids = self.config_stream(frame_size) traffic_opt = {"method": "throughput", "rate": 100, "duration": 20} - _, pps = self.tester.pktgen.measure(stream_ids, traffic_opt) + _, pps = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) self.verify( pps > 0, "%s can not receive packets of frame size %d" @@ -275,7 +275,7 @@ class TestCBDMA(TestCase): self.cbdma_copy_mode = "hw" self.get_core_list() dev_info = self.get_ports_info() - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info, prefix="cbdma" ) self.launch_dma_app(eal_params) @@ -294,7 +294,7 @@ class TestCBDMA(TestCase): self.cbdma_copy_mode = "hw" self.get_core_list() dev_info = self.get_ports_info() - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info, prefix="cbdma" ) self.launch_dma_app(eal_params) @@ -313,7 +313,7 @@ class TestCBDMA(TestCase): self.cbdma_copy_mode = "hw" self.get_core_list() dev_info = self.get_ports_info() - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info, prefix="cbdma" ) self.launch_dma_app(eal_params) @@ -334,7 +334,7 @@ class TestCBDMA(TestCase): for queue_num in queue_num_list: self.cbdma_dma_dev_num = queue_num dev_info = self.get_ports_info() - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info, prefix="cbdma" ) self.launch_dma_app(eal_params) @@ -354,7 +354,7 @@ class TestCBDMA(TestCase): self.cbdma_copy_mode = "hw" self.get_core_list() dev_info = self.get_ports_info() - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info, prefix="cbdma" ) self.launch_dma_app(eal_params) @@ -377,7 +377,7 @@ class TestCBDMA(TestCase): self.cbdma_copy_mode = "hw" self.get_core_list() dev_info = self.get_ports_info() - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info, prefix="cbdma" ) self.launch_dma_app(eal_params) @@ -402,13 +402,13 @@ class TestCBDMA(TestCase): dev_info = self.get_ports_info() dev_info.pop(0) self.get_core_list() - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd( cores="", eal_param="--vdev net_null_0 --proc-type=primary", ports=dev_info ) self.pmdout.execute_cmd("port stop all") self.cbdma_proc = "--proc-type=secondary" - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=dev_info ) self.launch_dma_app(eal_params) @@ -421,8 +421,8 @@ class TestCBDMA(TestCase): Run after each test case. """ self.send_session.send_expect("^c", "# ") - self.dut.close_session(self.send_session) - self.dut.kill_all() + self.sut_node.close_session(self.send_session) + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_checksum_offload.py b/tests/TestSuite_checksum_offload.py index 3554f5eb..020b4b2a 100644 --- a/tests/TestSuite_checksum_offload.py +++ b/tests/TestSuite_checksum_offload.py @@ -24,15 +24,15 @@ from scapy.layers.vxlan import VXLAN from scapy.packet import Raw from scapy.utils import rdpcap, wrpcap -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.exception import VerifyFailure -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.rst import RstReport from framework.settings import FOLDERS from framework.test_capabilities import DRIVER_TEST_LACK_CAPA from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream l3_proto_classes = [IP, IPv6] @@ -62,12 +62,12 @@ class TestChecksumOffload(TestCase): Checksum offload prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.pmdout: PmdOutput = PmdOutput(self.dut) - self.portMask = utils.create_mask([self.dut_ports[0]]) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.pmdout: PmdOutput = PmdOutput(self.sut_node) + self.portMask = utils.create_mask([self.sut_ports[0]]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) # get dts output path if self.logger.log_path.startswith(os.sep): self.output_path = self.logger.log_path @@ -88,28 +88,28 @@ class TestChecksumOffload(TestCase): + "--port-topology=loop", socket=self.ports_socket, ) - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("set fwd csum", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("set fwd csum", "testpmd>") def checksum_enablehw(self, port): - self.dut.send_expect("port stop all", "testpmd>") - self.dut.send_expect("rx_vxlan_port add 4789 0 ", "testpmd>") - self.dut.send_expect("csum set ip hw %d" % port, "testpmd>") - self.dut.send_expect("csum set udp hw %d" % port, "testpmd>") - self.dut.send_expect("csum set tcp hw %d" % port, "testpmd>") - self.dut.send_expect("csum set sctp hw %d" % port, "testpmd>") - self.dut.send_expect("csum set outer-ip hw %d" % port, "testpmd>") - self.dut.send_expect("csum set outer-udp hw %d" % port, "testpmd>") - self.dut.send_expect("csum parse-tunnel on %d" % port, "testpmd>") - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("rx_vxlan_port add 4789 0 ", "testpmd>") + self.sut_node.send_expect("csum set ip hw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set udp hw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set tcp hw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set sctp hw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set outer-ip hw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set outer-udp hw %d" % port, "testpmd>") + self.sut_node.send_expect("csum parse-tunnel on %d" % port, "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") def checksum_enablesw(self, port): - self.dut.send_expect("port stop all", "testpmd>") - self.dut.send_expect("csum set ip sw %d" % port, "testpmd>") - self.dut.send_expect("csum set udp sw %d" % port, "testpmd>") - self.dut.send_expect("csum set tcp sw %d" % port, "testpmd>") - self.dut.send_expect("csum set sctp sw %d" % port, "testpmd>") - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("csum set ip sw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set udp sw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set tcp sw %d" % port, "testpmd>") + self.sut_node.send_expect("csum set sctp sw %d" % port, "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") def get_chksum_values(self, packets_expected): """ @@ -119,15 +119,15 @@ class TestChecksumOffload(TestCase): chksum = dict() - self.tester.send_expect("scapy", ">>> ") + self.tg_node.send_expect("scapy", ">>> ") for packet_type in list(packets_expected.keys()): - self.tester.send_expect("p = %s" % packets_expected[packet_type], ">>>") - out = self.tester.send_command("p.show2()", timeout=1) + self.tg_node.send_expect("p = %s" % packets_expected[packet_type], ">>>") + out = self.tg_node.send_command("p.show2()", timeout=1) chksums = checksum_pattern.findall(out) chksum[packet_type] = chksums - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") return chksum @@ -135,14 +135,14 @@ class TestChecksumOffload(TestCase): """ Sends packets and check the checksum valid-flags. """ - self.dut.send_expect("start", "testpmd>") - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.sut_node.send_expect("start", "testpmd>") + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) for packet_type in list(packets_sent.keys()): - self.pkt = packet.Packet(pkt_str=packets_sent[packet_type]) - self.pkt.send_pkt(self.tester, tx_interface, count=4) - out = self.dut.get_session_output(timeout=1) + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder(pkt_str=packets_sent[packet_type]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_interface, count=4) + out = self.sut_node.get_session_output(timeout=1) lines = out.split("\r\n") # collect the checksum result @@ -191,36 +191,36 @@ class TestChecksumOffload(TestCase): "Packet Rx IP checksum valid-flags error!", ) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") def checksum_validate(self, packets_sent, packets_expected): """ Validate the checksum. """ - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - rx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + rx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - sniff_src = self.dut.get_mac_address(self.dut_ports[0]) + sniff_src = self.sut_node.get_mac_address(self.sut_ports[0]) result = dict() chksum = self.get_chksum_values(packets_expected) - inst = self.tester.tcpdump_sniff_packets( + inst = self.tg_node.tcpdump_sniff_packets( intf=rx_interface, count=len(packets_sent) * 4, filters=[{"layer": "ether", "config": {"src": sniff_src}}], ) - self.pkt = packet.Packet() + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() for packet_type in list(packets_sent.keys()): - self.pkt.append_pkt(packets_sent[packet_type]) - self.pkt.send_pkt(crb=self.tester, tx_port=tx_interface, count=4) + self.scapy_pkt_builder.append_pkt(packets_sent[packet_type]) + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, count=4) - p = self.tester.load_tcpdump_sniff_packets(inst) + p = self.tg_node.load_tcpdump_sniff_packets(inst) nr_packets = len(p) print(p) packets_received = [ @@ -264,11 +264,11 @@ class TestChecksumOffload(TestCase): return result def send_scapy_packet(self, packet: str): - itf = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0])) + itf = self.tg_node.get_interface(self.tg_node.get_local_port(self.sut_ports[0])) - self.tester.scapy_foreground() - self.tester.scapy_append(f'sendp({packet}, iface="{itf}")') - return self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append(f'sendp({packet}, iface="{itf}")') + return self.tg_node.scapy_execute() def get_pkt_rx_l4_cksum(self, testpmd_output: str) -> bool: return self.checksum_flags_are_good("RTE_MBUF_F_RX_L4_CKSUM_", testpmd_output) @@ -366,10 +366,10 @@ class TestChecksumOffload(TestCase): return False def scapy_exec(self, cmd: str, timeout=1) -> str: - return self.tester.send_expect(cmd, ">>>", timeout=timeout) + return self.tg_node.send_expect(cmd, ">>>", timeout=timeout) - def get_packets(self, dut_mac, tester_mac): - eth = Ether(dst=dut_mac, src=tester_mac) + def get_packets(self, sut_mac, tg_mac): + eth = Ether(dst=sut_mac, src=tg_mac) packets = [] checksum_options = ( {}, @@ -420,36 +420,36 @@ class TestChecksumOffload(TestCase): return packets def send_tx_package( - self, packet_file_path, capture_file_path, packets, iface, dut_mac + self, packet_file_path, capture_file_path, packets, iface, sut_mac ): if os.path.isfile(capture_file_path): os.remove(capture_file_path) - self.tester.send_expect( - f"tcpdump -i '{iface}' ether src {dut_mac} -s 0 -w {capture_file_path} &", + self.tg_node.send_expect( + f"tcpdump -i '{iface}' ether src {sut_mac} -s 0 -w {capture_file_path} &", "# ", ) if os.path.isfile(packet_file_path): os.remove(packet_file_path) wrpcap(packet_file_path, packets) - self.tester.session.copy_file_to(packet_file_path, packet_file_path) + self.tg_node.session.copy_file_to(packet_file_path, packet_file_path) # send packet - self.tester.send_expect("scapy", ">>>") + self.tg_node.send_expect("scapy", ">>>") self.scapy_exec(f"packets = rdpcap('{packet_file_path}')") for i in range(0, len(packets)): self.scapy_exec(f"packets[{i}].show") self.scapy_exec(f"sendp(packets[{i}], iface='{iface}')") self.pmdout.get_output(timeout=0.5) - self.dut.send_expect( - "show port stats {}".format(self.dut_ports[0]), "testpmd>" + self.sut_node.send_expect( + "show port stats {}".format(self.sut_ports[0]), "testpmd>" ) - self.tester.send_expect("quit()", "# ") + self.tg_node.send_expect("quit()", "# ") time.sleep(1) - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") time.sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "#") + self.tg_node.send_expect('echo "Cleaning buffer"', "#") time.sleep(1) return @@ -502,7 +502,7 @@ class TestChecksumOffload(TestCase): Verify that the same number of packet are correctly received on the traffic generator side. """ - mac = self.dut.get_mac_address(self.dut_ports[0]) + mac = self.sut_node.get_mac_address(self.sut_ports[0]) pktsChkErr = { "IP/UDP": 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=1)/IP(chksum=0x0)/UDP(chksum=0xf)/("X"*46)' @@ -534,10 +534,10 @@ class TestChecksumOffload(TestCase): del pktsChkErr["IP/SCTP"] del pkts["IP/SCTP"] - self.checksum_enablehw(self.dut_ports[0]) - self.dut.send_expect("start", "testpmd>") + self.checksum_enablehw(self.sut_ports[0]) + self.sut_node.send_expect("start", "testpmd>") result = self.checksum_validate(pktsChkErr, pkts) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") self.verify(len(result) == 0, ",".join(list(result.values()))) def test_rx_checksum_valid_flags(self): @@ -546,7 +546,7 @@ class TestChecksumOffload(TestCase): transmit packet.Enable Checksum offload. Verify the checksum valid-flags. """ - mac = self.dut.get_mac_address(self.dut_ports[0]) + mac = self.sut_node.get_mac_address(self.sut_ports[0]) pkts_ref = { "IP/UDP": 'Ether(dst="%s", src="52:00:00:00:00:00")/IP()/UDP()/("X"*46)' @@ -561,7 +561,7 @@ class TestChecksumOffload(TestCase): % mac, } - self.checksum_enablehw(self.dut_ports[0]) + self.checksum_enablehw(self.sut_ports[0]) # get the packet checksum value result = self.get_chksum_values(pkts_ref) @@ -611,7 +611,7 @@ class TestChecksumOffload(TestCase): Verify that the same number of packet are correctly received on the traffic generator side. """ - mac = self.dut.get_mac_address(self.dut_ports[0]) + mac = self.sut_node.get_mac_address(self.sut_ports[0]) pkts = { "IP/UDP": 'Ether(dst="%s", src="52:00:00:00:00:00")/IP(chksum=0x0)/UDP(chksum=0xf)/("X"*46)' @@ -643,13 +643,13 @@ class TestChecksumOffload(TestCase): del pkts["IP/SCTP"] del pkts_ref["IP/SCTP"] - self.checksum_enablehw(self.dut_ports[0]) + self.checksum_enablehw(self.sut_ports[0]) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") result = self.checksum_validate(pkts, pkts_ref) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") self.verify(len(result) == 0, ",".join(list(result.values()))) @@ -660,7 +660,7 @@ class TestChecksumOffload(TestCase): Verify that the same number of packet are correctly received on the traffic generator side. """ - mac = self.dut.get_mac_address(self.dut_ports[0]) + mac = self.sut_node.get_mac_address(self.sut_ports[0]) sndIP = "10.0.0.1" sndIPv6 = "::1" sndPkts = { @@ -687,12 +687,12 @@ class TestChecksumOffload(TestCase): % (mac, expIPv6), } - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") result = self.checksum_validate(sndPkts, expPkts) self.verify(len(result) == 0, ",".join(list(result.values()))) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") def benchmark(self, lcore, ptype, mode, flow_format, size_list, nic): """ @@ -701,48 +701,48 @@ class TestChecksumOffload(TestCase): Bps = dict() Pps = dict() Pct = dict() - dmac = self.dut.get_mac_address(self.dut_ports[0]) - dmac1 = self.dut.get_mac_address(self.dut_ports[1]) + dmac = self.sut_node.get_mac_address(self.sut_ports[0]) + dmac1 = self.sut_node.get_mac_address(self.sut_ports[1]) result = [2, lcore, ptype, mode] for size in size_list: flow = flow_format % (dmac, size) pcap = os.sep.join([self.output_path, "test.pcap"]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) + self.tg_node.scapy_execute() flow = flow_format % (dmac1, size) pcap = os.sep.join([self.output_path, "test1.pcap"]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) + self.tg_node.scapy_execute() tgenInput = [] pcap = os.sep.join([self.output_path, "test.pcap"]) tgenInput.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), pcap, ) ) pcap = os.sep.join([self.output_path, "test1.pcap"]) tgenInput.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), pcap, ) ) # clear streams before add new streams - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() # create an instance to set stream field setting # Moved here because it messes with the ability of the functional tests to use scapy. - self.pktgen_helper = PacketGeneratorHelper() - # run packet generator + self.pktgen_helper = TrafficGeneratorStream() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) - Bps[str(size)], Pps[str(size)] = self.tester.pktgen.measure_throughput( + Bps[str(size)], Pps[str(size)] = self.tg_node.perf_tg.measure_throughput( stream_ids=streams ) self.verify(Pps[str(size)] > 0, "No traffic detected") @@ -759,8 +759,8 @@ class TestChecksumOffload(TestCase): Test checksum offload performance. """ # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") - self.dut.send_expect("quit", "#") + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") + self.sut_node.send_expect("quit", "#") # sizes = [64, 128, 256, 512, 1024] sizes = [64, 128] @@ -776,7 +776,7 @@ class TestChecksumOffload(TestCase): del pkts["IP/SCTP"] lcore = "1S/2C/1T" - portMask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + portMask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) for mode in ["sw", "hw"]: self.logger.info("%s performance" % mode) tblheader = ["Ports", "S/C/T", "Packet Type", "Mode"] @@ -792,34 +792,34 @@ class TestChecksumOffload(TestCase): socket=self.ports_socket, ) - self.dut.send_expect("set fwd csum", "testpmd> ") + self.sut_node.send_expect("set fwd csum", "testpmd> ") if mode == "hw": - self.checksum_enablehw(self.dut_ports[0]) - self.checksum_enablehw(self.dut_ports[1]) + self.checksum_enablehw(self.sut_ports[0]) + self.checksum_enablehw(self.sut_ports[1]) else: - self.checksum_enablesw(self.dut_ports[0]) - self.checksum_enablesw(self.dut_ports[1]) + self.checksum_enablesw(self.sut_ports[0]) + self.checksum_enablesw(self.sut_ports[1]) - self.dut.send_expect("start", "testpmd> ", 3) + self.sut_node.send_expect("start", "testpmd> ", 3) for ptype in list(pkts.keys()): self.benchmark(lcore, ptype, mode, pkts[ptype], sizes, self.nic) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "#", 10) self.result_table_print() def test_hardware_checksum_check_ip_rx(self): - self.tester.send_expect("scapy", ">>>") - self.checksum_enablehw(self.dut_ports[0]) - self.dut.send_expect("start", "testpmd>") - self.pmdout.wait_link_status_up(self.dut_ports[0]) + self.tg_node.send_expect("scapy", ">>>") + self.checksum_enablehw(self.sut_ports[0]) + self.sut_node.send_expect("start", "testpmd>") + self.pmdout.wait_link_status_up(self.sut_ports[0]) verification_errors: List[VerifyFailure] = [] - iface = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0])) - dut_mac = self.dut.get_mac_address(self.dut_ports[0]) - tester_mac = self.tester.get_mac(self.tester.get_local_port(self.dut_ports[0])) + iface = self.tg_node.get_interface(self.tg_node.get_local_port(self.sut_ports[0])) + sut_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(self.tg_node.get_local_port(self.sut_ports[0])) - self.scapy_exec(f"eth = Ether(dst='{dut_mac}', src='{tester_mac}')") + self.scapy_exec(f"eth = Ether(dst='{sut_mac}', src='{tg_mac}')") self.scapy_exec(f"iface = '{iface}'") # Untunnelled @@ -838,19 +838,19 @@ class TestChecksumOffload(TestCase): self.logger.error(str(err)) self.verify(len(verification_errors) == 0, "See previous output") - self.tester.send_expect("quit()", "# ") - self.dut.send_expect("stop", "testpmd>") + self.tg_node.send_expect("quit()", "# ") + self.sut_node.send_expect("stop", "testpmd>") def test_hardware_checksum_check_ip_tx(self): - self.checksum_enablehw(self.dut_ports[0]) - self.dut.send_expect("start", "testpmd>") - self.pmdout.wait_link_status_up(self.dut_ports[0]) + self.checksum_enablehw(self.sut_ports[0]) + self.sut_node.send_expect("start", "testpmd>") + self.pmdout.wait_link_status_up(self.sut_ports[0]) verification_errors: List[VerifyFailure] = [] - iface = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0])) - dut_mac = self.dut.get_mac_address(self.dut_ports[0]) - tester_mac = self.tester.get_mac(self.tester.get_local_port(self.dut_ports[0])) - eth = Ether(dst=dut_mac, src=tester_mac) + iface = self.tg_node.get_interface(self.tg_node.get_local_port(self.sut_ports[0])) + sut_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(self.tg_node.get_local_port(self.sut_ports[0])) + eth = Ether(dst=sut_mac, src=tg_mac) checksum_options = ( {}, @@ -865,13 +865,13 @@ class TestChecksumOffload(TestCase): capture_file_name = "test_hardware_checksum_check_l3_tx_capture.pcap" packet_file_path = "/tmp/test_hardware_checksum_check_l3_tx_packets.pcap" - capture_file_path = "/tmp/tester/" + capture_file_name + capture_file_path = "/tmp/tg/" + capture_file_name self.send_tx_package( - packet_file_path, capture_file_path, packets, iface, dut_mac + packet_file_path, capture_file_path, packets, iface, sut_mac ) - self.tester.session.copy_file_from( + self.tg_node.session.copy_file_from( capture_file_path, "output/tmp/pcap/" + capture_file_name ) captured_packets = rdpcap("output/tmp/pcap/" + capture_file_name) @@ -890,24 +890,24 @@ class TestChecksumOffload(TestCase): f" checksum when it should have had the opposite." ) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") if len(error_messages) != 0: for error_msg in error_messages: self.logger.error(error_msg) self.verify(False, "See prior output") def test_hardware_checksum_check_l4_rx(self): - self.checksum_enablehw(self.dut_ports[0]) - self.dut.send_expect("start", "testpmd>") + self.checksum_enablehw(self.sut_ports[0]) + self.sut_node.send_expect("start", "testpmd>") verification_errors: List[VerifyFailure] = [] - iface = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0])) - dut_mac = self.dut.get_mac_address(self.dut_ports[0]) - tester_mac = self.tester.get_mac(self.tester.get_local_port(self.dut_ports[0])) + iface = self.tg_node.get_interface(self.tg_node.get_local_port(self.sut_ports[0])) + sut_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(self.tg_node.get_local_port(self.sut_ports[0])) - self.tester.send_expect("scapy", ">>> ") - self.scapy_exec(f"eth = Ether(dst='{dut_mac}', src='{tester_mac}')") + self.tg_node.send_expect("scapy", ">>> ") + self.scapy_exec(f"eth = Ether(dst='{sut_mac}', src='{tg_mac}')") self.scapy_exec(f"iface = '{iface}'") # Untunneled for l3 in l3_protos: @@ -989,35 +989,35 @@ class TestChecksumOffload(TestCase): # if vf is not None: # verification_errors.append(vf) - self.tester.send_expect("quit()", "#") - self.dut.send_expect("stop", "testpmd>") + self.tg_node.send_expect("quit()", "#") + self.sut_node.send_expect("stop", "testpmd>") for err in verification_errors: self.logger.error(str(err)) self.verify(len(verification_errors) == 0, "See previous output") def test_hardware_checksum_check_l4_tx(self): - self.checksum_enablehw(self.dut_ports[0]) - self.dut.send_expect("start", "testpmd>") + self.checksum_enablehw(self.sut_ports[0]) + self.sut_node.send_expect("start", "testpmd>") verification_errors: List[VerifyFailure] = [] - iface = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0])) - dut_mac = self.dut.get_mac_address(self.dut_ports[0]) - tester_mac = self.tester.get_mac(self.tester.get_local_port(self.dut_ports[0])) + iface = self.tg_node.get_interface(self.tg_node.get_local_port(self.sut_ports[0])) + sut_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(self.tg_node.get_local_port(self.sut_ports[0])) - packets = self.get_packets(dut_mac, tester_mac) + packets = self.get_packets(sut_mac, tg_mac) capture_file_name = "test_hardware_checksum_check_l4_tx_capture.pcap" packet_file_path = "/tmp/test_hardware_checksum_check_l4_tx_packets.pcap" - capture_file_path = "/tmp/tester/" + capture_file_name + capture_file_path = "/tmp/tg/" + capture_file_name self.send_tx_package( - packet_file_path, capture_file_path, packets, iface, dut_mac + packet_file_path, capture_file_path, packets, iface, sut_mac ) - self.tester.session.copy_file_from( + self.tg_node.session.copy_file_from( capture_file_path, "output/tmp/pcap/" + capture_file_name ) @@ -1027,7 +1027,7 @@ class TestChecksumOffload(TestCase): len(packets) == len(captured_packets), "Not all packets were received" ) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") self.count = 0 error_messages = self.validate_packet_list_checksums(captured_packets) @@ -1041,7 +1041,7 @@ class TestChecksumOffload(TestCase): """ Run after each test case. """ - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def tear_down_all(self): """ diff --git a/tests/TestSuite_cloud_filter_with_l4_port.py b/tests/TestSuite_cloud_filter_with_l4_port.py index 5f4ff0de..55ee6907 100644 --- a/tests/TestSuite_cloud_filter_with_l4_port.py +++ b/tests/TestSuite_cloud_filter_with_l4_port.py @@ -15,13 +15,13 @@ import time import scapy.layers.inet from scapy.utils import rdpcap -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils -from framework.crb import Crb -from framework.dut import Dut from framework.exception import VerifyFailure +from framework.node import Node from framework.pmd_output import PmdOutput from framework.settings import DRIVERS +from framework.sut_node import SutNode from framework.test_case import TestCase MAX_QUEUE = 16 @@ -34,19 +34,19 @@ class TestCloudFilterWithL4Port(TestCase): Generic filter Prerequistites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.cores = "1S/8C/1T" self.pf_cores = "1S/8C/1T" - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pkt_obj = packet.Packet() + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() self.verify( self.nic @@ -66,24 +66,24 @@ class TestCloudFilterWithL4Port(TestCase): """ Run before each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=%d --txq=%d --disable-rss" % (MAX_QUEUE, MAX_QUEUE), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) def destroy_env(self): """ This is to stop testpmd. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) def compare_memory_rules(self, expectedRules): @@ -91,7 +91,7 @@ class TestCloudFilterWithL4Port(TestCase): dump all flow rules that have been created in memory and compare that total rules number with the given expected number to see if they are equal, as to get your conclusion after you have deleted any flow rule entry. """ - outstring = self.dut.send_expect("flow list 0", "testpmd> ", 20) + outstring = self.sut_node.send_expect("flow list 0", "testpmd> ", 20) result_scanner = r"\d*.*?\d*.*?\d*.*?=>*" scanner = re.compile(result_scanner, re.DOTALL) m = scanner.findall(outstring) @@ -108,10 +108,10 @@ class TestCloudFilterWithL4Port(TestCase): # check if there are expected flow rules have been created self.compare_memory_rules(rule_num) # check if one rule destroyed with success - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") self.compare_memory_rules(rule_num - 1) # check if all flow rules have been removed with success - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.compare_memory_rules(0) def sendpkt(self, pktstr, count=1): @@ -119,11 +119,11 @@ class TestCloudFilterWithL4Port(TestCase): py_version = sys.version if py_version.startswith("3."): - self.pkt_obj.pktgen.pkts.clear() + self.scapy_pkt_builder.scapy_pkt_util.pkts.clear() else: - del self.pkt_obj.pktgen.pkts[:] - self.pkt_obj.append_pkt(pktstr) - self.pkt_obj.send_pkt(self.tester, tx_port=self.tester_itf, count=count) + del self.scapy_pkt_builder.scapy_pkt_util.pkts[:] + self.scapy_pkt_builder.append_pkt(pktstr) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=count) def sendpkt_check_result(self, src_port, dst_port, queue, match, pctype=""): match_info = "/queue %d: " % queue @@ -160,7 +160,7 @@ class TestCloudFilterWithL4Port(TestCase): pktstr="Ether()/IP()/UDP(sport=%s, dport=%s)" % (src_port, dst_port) ) - out_pf = self.dut.get_session_output(timeout=2) + out_pf = self.sut_node.get_session_output(timeout=2) print("out_pf is %s" % out_pf) if match == 1: @@ -182,14 +182,14 @@ class TestCloudFilterWithL4Port(TestCase): ): # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / %s / %s %s is %d / end actions pf / queue index %d / end" % (ip_type, l4_port_type, src_dst, port_value, queue_id), "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / %s / %s %s is %d / end actions pf / queue index %d / end" % (ip_type, l4_port_type, src_dst, port_value, queue_id), "created", @@ -222,7 +222,7 @@ class TestCloudFilterWithL4Port(TestCase): ) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") if src_dst is "src": self.sendpkt_check_result( @@ -376,27 +376,27 @@ class TestCloudFilterWithL4Port(TestCase): ) def test_multi_rule(self): - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 11 / end actions pf / queue index 1 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp src is 22 / end actions pf / queue index 2 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp src is 33 / end actions pf / queue index 3 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp dst is 44 / end actions pf / queue index 4 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp dst is 55 / end actions pf / queue index 5 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp dst is 66 / end actions pf / queue index 6 / end", "created", ) @@ -409,14 +409,14 @@ class TestCloudFilterWithL4Port(TestCase): self.sendpkt_check_result("", "66", 6, 1, "ipv4-sctp") # destroy - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") self.sendpkt_check_result("11", "", 1, 0, "ipv4-udp") self.compare_memory_rules(5) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result("22", "", 2, 0, "ipv4-tcp") @@ -424,22 +424,22 @@ class TestCloudFilterWithL4Port(TestCase): def test_negative(self): # unsupported rules - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 156 dst is 156 / end actions pf / queue index 1 / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 156 / end actions pf / queue index 1 / end", "create", ) # conflicted rules - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 156 / end actions pf / queue index 2 / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp src is 156 / end actions pf / queue index 2 / end", "error", ) @@ -449,10 +449,10 @@ class TestCloudFilterWithL4Port(TestCase): Run after each test case. """ self.destroy_env() - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_cmdline.py b/tests/TestSuite_cmdline.py index 54201910..9316d590 100644 --- a/tests/TestSuite_cmdline.py +++ b/tests/TestSuite_cmdline.py @@ -18,16 +18,16 @@ class TestCmdline(TestCase): Cmdline Prerequisites: cmdline build pass - At least one core in DUT + At least one core in SUT """ - out = self.dut.build_dpdk_apps("examples/cmdline") + out = self.sut_node.build_dpdk_apps("examples/cmdline") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") # Run cmdline app - self.app_cmdline_path = self.dut.apps_name["cmdline"] - self.eal_para = self.dut.create_eal_parameters(cores="1S/1C/1T") - self.dut.send_expect( + self.app_cmdline_path = self.sut_node.apps_name["cmdline"] + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/1C/1T") + self.sut_node.send_expect( r"./%s %s" % (self.app_cmdline_path, self.eal_para), "> ", 10 ) @@ -44,31 +44,31 @@ class TestCmdline(TestCase): """ # add a test object with an IP address associated - out = self.dut.send_expect("add object 192.168.0.1", "example> ") + out = self.sut_node.send_expect("add object 192.168.0.1", "example> ") self.verify("Object object added, ip=192.168.0.1" in out, "add command error") # verify the object existence - out = self.dut.send_expect("add object 192.168.0.1", "example> ") + out = self.sut_node.send_expect("add object 192.168.0.1", "example> ") self.verify("Object object already exist" in out, "double add command error") # show the object result by 'show' command - out = self.dut.send_expect("show object", "example> ") + out = self.sut_node.send_expect("show object", "example> ") self.verify("Object object, ip=192.168.0.1" in out, "show command error") # delete the object in cmdline - out = self.dut.send_expect("del object", "example> ") + out = self.sut_node.send_expect("del object", "example> ") self.verify("Object object removed, ip=192.168.0.1" in out, "del command error") # double delete the object to verify the correctness - out = self.dut.send_expect("del object", "example> ", 1) + out = self.sut_node.send_expect("del object", "example> ", 1) self.verify("Bad arguments" in out, "double del command error") # verify no such object anymore - out = self.dut.send_expect("show object", "example> ", 1) + out = self.sut_node.send_expect("show object", "example> ", 1) self.verify("Bad arguments" in out, "final show command error") # verify the help command - out = self.dut.send_expect("help", "example> ", 1) + out = self.sut_node.send_expect("help", "example> ", 1) """ Demo example of command line interface in RTE @@ -87,7 +87,7 @@ class TestCmdline(TestCase): """ self.verify(" " in out, "help command error") - out = self.dut.send_expect("?", "example> ", 1) + out = self.sut_node.send_expect("?", "example> ", 1) """ show [Mul-choice STRING]: Show/del an object del [Mul-choice STRING]: Show/del an object @@ -108,4 +108,4 @@ class TestCmdline(TestCase): Run after each test suite. Stop cmdline app. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_compressdev_isal_pmd.py b/tests/TestSuite_compressdev_isal_pmd.py index 60432371..f8d8e1fb 100644 --- a/tests/TestSuite_compressdev_isal_pmd.py +++ b/tests/TestSuite_compressdev_isal_pmd.py @@ -59,7 +59,7 @@ class TestCompressdevIsalPmd(TestCase): pass def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() if self._perf_result: with open(self.logger.log_path + "/" + self.suite_name + ".json", "a") as f: json.dump(self._perf_result, f, indent=4) diff --git a/tests/TestSuite_compressdev_qat_pmd.py b/tests/TestSuite_compressdev_qat_pmd.py index 9ff26c5f..6b4bcb1d 100644 --- a/tests/TestSuite_compressdev_qat_pmd.py +++ b/tests/TestSuite_compressdev_qat_pmd.py @@ -178,7 +178,7 @@ class TestCompressdevQatPmd(TestCase): pass def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() if self._perf_result: with open( self.logger.log_path + "/" + self.suite_name + ".json", "a" diff --git a/tests/TestSuite_compressdev_zlib_pmd.py b/tests/TestSuite_compressdev_zlib_pmd.py index 1945af0a..93fb7356 100644 --- a/tests/TestSuite_compressdev_zlib_pmd.py +++ b/tests/TestSuite_compressdev_zlib_pmd.py @@ -60,7 +60,7 @@ class TestCompressdevZlibPmd(TestCase): pass def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() if self._perf_result: with open( self.logger.log_path + "/" + self.suite_name + ".json", "a" diff --git a/tests/TestSuite_coremask.py b/tests/TestSuite_coremask.py index 54db5550..2d4516a9 100644 --- a/tests/TestSuite_coremask.py +++ b/tests/TestSuite_coremask.py @@ -36,10 +36,10 @@ class TestCoremask(TestCase): Coremask Prerequisites. """ - self.port_mask = utils.create_mask(self.dut.get_ports(self.nic)) - self.mem_channel = self.dut.get_memory_channels() - self.app_test_path = self.dut.apps_name["test"] - self.all_cores = self.dut.get_core_list("all") + self.port_mask = utils.create_mask(self.sut_node.get_ports(self.nic)) + self.mem_channel = self.sut_node.get_memory_channels() + self.app_test_path = self.sut_node.apps_name["test"] + self.all_cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -58,7 +58,7 @@ class TestCoremask(TestCase): command = command_line % (self.app_test_path, core_mask, self.mem_channel) - out = self.dut.send_expect(command, "RTE>>", 10) + out = self.sut_node.send_expect(command, "RTE>>", 10) self.verify( "EAL: Detected lcore %s as core" % core in out, "Core %s not detected" % core, @@ -68,7 +68,7 @@ class TestCoremask(TestCase): "EAL: Main lcore %s is ready" % core in out, "Core %s not ready" % core ) - self.dut.send_expect("quit", "# ", 10) + self.sut_node.send_expect("quit", "# ", 10) def test_all_cores_coremask(self): """ @@ -81,7 +81,7 @@ class TestCoremask(TestCase): command = command_line % (self.app_test_path, core_mask, self.mem_channel) - out = self.dut.send_expect(command, "RTE>>", 10) + out = self.sut_node.send_expect(command, "RTE>>", 10) self.verify( "EAL: Main lcore %s is ready" % first_core in out, "Core %s not ready" % first_core, @@ -102,7 +102,7 @@ class TestCoremask(TestCase): "Core %s not detected" % core, ) - self.dut.send_expect("quit", "# ", 10) + self.sut_node.send_expect("quit", "# ", 10) def test_big_coremask(self): """ @@ -116,7 +116,7 @@ class TestCoremask(TestCase): big_coremask += "f" command = command_line % (self.app_test_path, big_coremask, self.mem_channel) try: - out = self.dut.send_expect(command, "RTE>>", 10) + out = self.sut_node.send_expect(command, "RTE>>", 10) except: self.verify("EAL: invalid coremask" in out, "Small core mask set") @@ -129,7 +129,7 @@ class TestCoremask(TestCase): "Core %s not detected" % core, ) - self.dut.send_expect("quit", "# ", 10) + self.sut_node.send_expect("quit", "# ", 10) def test_wrong_coremask(self): """ @@ -164,20 +164,20 @@ class TestCoremask(TestCase): command = command_line % (self.app_test_path, coremask, self.mem_channel) try: - out = self.dut.send_expect(command, "# ", 5) + out = self.sut_node.send_expect(command, "# ", 5) self.verify( "EAL: invalid coremask" in out, "Wrong core mask (%s) accepted" % coremask, ) except: - self.dut.send_expect("quit", "# ", 5) + self.sut_node.send_expect("quit", "# ", 5) raise VerifyFailure("Wrong core mask (%s) accepted" % coremask) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_crypto_perf_cryptodev_perf.py b/tests/TestSuite_crypto_perf_cryptodev_perf.py index 9270b52d..2ea6d4f8 100644 --- a/tests/TestSuite_crypto_perf_cryptodev_perf.py +++ b/tests/TestSuite_crypto_perf_cryptodev_perf.py @@ -42,18 +42,18 @@ class PerfTestsCryptodev(TestCase): "digest-sz": None, "csv-friendly": None, } - self._app_path = self.dut.apps_name["test-crypto-perf"] - page_size = self.dut.send_expect( + self._app_path = self.sut_node.apps_name["test-crypto-perf"] + page_size = self.sut_node.send_expect( "awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# " ) if int(page_size) == 1024 * 1024: - self.dut.send_expect( + self.sut_node.send_expect( "echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages" % (page_size), "# ", 5, ) - self.dut.send_expect( + self.sut_node.send_expect( "echo 16 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages" % (page_size), "# ", @@ -62,9 +62,9 @@ class PerfTestsCryptodev(TestCase): cc.bind_qat_device(self, "vfio-pci") src_files = ["dep/test_aes_cbc.data", "dep/test_aes_gcm.data"] - self.dut_file_dir = "/tmp" + self.sut_file_dir = "/tmp" for file in src_files: - self.dut.session.copy_file_to(file, self.dut_file_dir) + self.sut_node.session.copy_file_to(file, self.sut_file_dir) def tear_down_all(self): if self._perf_result: @@ -77,7 +77,7 @@ class PerfTestsCryptodev(TestCase): pass def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def test_verify_aesni_mb(self): self._run_crypto_func() @@ -180,7 +180,7 @@ class PerfTestsCryptodev(TestCase): if cc.is_test_skip(self): return - cores = ",".join(self.dut.get_core_list("1S/2C/1T")) + cores = ",".join(self.sut_node.get_core_list("1S/2C/1T")) config = {"l": cores} devices = self._get_crypto_device(1) if not devices: @@ -195,8 +195,8 @@ class PerfTestsCryptodev(TestCase): self._app_path, eal_opt_str, crypto_func_opt_str ) try: - self.dut.send_expect( - cmd_str + ">%s/%s.txt" % (self.dut_file_dir, self.running_case), + self.sut_node.send_expect( + cmd_str + ">%s/%s.txt" % (self.sut_file_dir, self.running_case), "#", 600, ) @@ -204,8 +204,8 @@ class PerfTestsCryptodev(TestCase): self.logger.error(ex) raise ex - out = self.dut.send_command( - "cat %s/%s.txt" % (self.dut_file_dir, self.running_case), 30 + out = self.sut_node.send_command( + "cat %s/%s.txt" % (self.sut_file_dir, self.running_case), 30 ) self.verify("Error" not in out, "Test function failed") @@ -228,7 +228,7 @@ class PerfTestsCryptodev(TestCase): self._app_path, eal_opt_str, crypto_perf_opt_str ) try: - out = self.dut.send_expect(cmd_str, "#", 600) + out = self.sut_node.send_expect(cmd_str, "#", 600) except Exception as ex: self.logger.error(ex) raise ex @@ -323,7 +323,7 @@ class PerfTestsCryptodev(TestCase): def _get_core_and_thread_num(self): cpu_info = {} - out = self.dut.send_expect("lscpu", "#") + out = self.sut_node.send_expect("lscpu", "#") for each_line in out.split("\n"): if each_line.find(":") == -1: continue @@ -395,19 +395,19 @@ class PerfTestsCryptodev(TestCase): framesizes = self.get_case_cfg()["buffer-sz"].split(",") running_case = self.running_case - dut = self.dut.crb["IP"] - dut_index = self._suite_result.internals.index(dut) - target_index = self._suite_result.internals[dut_index + 1].index(self.target) - suite_index = self._suite_result.internals[dut_index + 1][ + sut = self.sut_node.node["IP"] + sut_index = self._suite_result.internals.index(sut) + target_index = self._suite_result.internals[sut_index + 1].index(self.target) + suite_index = self._suite_result.internals[sut_index + 1][ target_index + 2 ].index(self.suite_name) - case_index = self._suite_result.internals[dut_index + 1][target_index + 2][ + case_index = self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ].index(running_case) - self._suite_result.internals[dut_index + 1][target_index + 2][ + self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ].pop(case_index + 1) - self._suite_result.internals[dut_index + 1][target_index + 2][ + self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ].pop(case_index) diff --git a/tests/TestSuite_dcf_lifecycle.py b/tests/TestSuite_dcf_lifecycle.py index 60f2ad60..381f378a 100644 --- a/tests/TestSuite_dcf_lifecycle.py +++ b/tests/TestSuite_dcf_lifecycle.py @@ -26,8 +26,8 @@ from pprint import pformat import framework.utils as utils from framework.exception import VerifyFailure -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase @@ -36,22 +36,22 @@ class TestDcfLifeCycle(TestCase): @property def target_dir(self): target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir def d_con(self, cmd): _cmd = [cmd, "# ", 15] if isinstance(cmd, str) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def d_a_con(self, cmds): if isinstance(cmds, str): _cmd = [cmds, "# ", 15] - return self.dut.alt_session.send_expect(*_cmd) + return self.sut_node.alt_session.send_expect(*_cmd) else: - return [self.dut.alt_session.send_expect(_cmd, "# ", 10) for _cmd in cmds] + return [self.sut_node.alt_session.send_expect(_cmd, "# ", 10) for _cmd in cmds] def vf_pmd2_con(self, cmd): _cmd = [cmd, "# ", 15] if isinstance(cmd, str) else cmd @@ -66,11 +66,11 @@ class TestDcfLifeCycle(TestCase): } return layer - def get_mac_layer(self, dut_port_id=0, vf_id=0): + def get_mac_layer(self, sut_port_id=0, vf_id=0): dmac = ( - self.vf_ports_info[dut_port_id]["vfs_mac"][vf_id] + self.vf_ports_info[sut_port_id]["vfs_mac"][vf_id] if vf_id is not None - else self.dut.ports_info[dut_port_id]["mac"] + else self.sut_node.ports_info[sut_port_id]["mac"] ) layer = { "ether": { @@ -84,20 +84,20 @@ class TestDcfLifeCycle(TestCase): pktlen = 64 - headers_size return pktlen - def config_stream(self, dut_port_id=0, vf_id=None): + def config_stream(self, sut_port_id=0, vf_id=None): pkt_layers = {"raw": {"payload": ["58"] * self.get_pkt_len()}} pkt_layers.update(self.get_ip_layer()) - pkt_layers.update(self.get_mac_layer(dut_port_id, vf_id)) - pkt = Packet(pkt_type="IP_RAW") + pkt_layers.update(self.get_mac_layer(sut_port_id, vf_id)) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW") for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - self.logger.info(pkt.pktgen.pkt.command()) - return pkt + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + self.logger.info(scapy_pkt_builder.scapy_pkt_util.pkt.command()) + return scapy_pkt_builder - def send_packet_by_scapy(self, pkt, dut_port_id=0, count=1): - tester_port_id = self.tester.get_local_port(dut_port_id) - tx_iface = self.tester.get_interface(tester_port_id) - pkt.send_pkt(crb=self.tester, tx_port=tx_iface, count=count) + def send_packet_by_scapy(self, scapy_pkt_builder, sut_port_id=0, count=1): + tg_port_id = self.tg_node.get_local_port(sut_port_id) + tx_iface = self.tg_node.get_interface(tg_port_id) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_iface, count=count) def init_adq(self): cmds = [ @@ -107,13 +107,13 @@ class TestDcfLifeCycle(TestCase): ] self.d_a_con(cmds) - def set_adq_on_pf(self, dut_port_id=0): + def set_adq_on_pf(self, sut_port_id=0): """ Set ADQ on PF """ msg = "Set ADQ on PF" self.logger.info(msg) - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmds = [ f"ethtool -K {intf} hw-tc-offload on", f"tc qdisc add dev {intf} ingress", @@ -126,7 +126,7 @@ class TestDcfLifeCycle(TestCase): self.is_adq_set = True return output - def remove_adq_on_pf(self, dut_port_id=0): + def remove_adq_on_pf(self, sut_port_id=0): """ Remove ADQ on PF """ @@ -134,7 +134,7 @@ class TestDcfLifeCycle(TestCase): return msg = "Remove ADQ on PF" self.logger.info(msg) - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmds = [ f"tc filter del dev {intf} parent ffff: pref 1 protocol ip", f"tc filter show dev {intf} parent ffff:", @@ -146,13 +146,13 @@ class TestDcfLifeCycle(TestCase): self.d_a_con(cmds) self.is_adq_set = False - def set_adq_mac_vlan(self, dut_port_id=0): + def set_adq_mac_vlan(self, sut_port_id=0): """ change the ADQ commands to MAC-VLAN """ msg = "change the ADQ commands to MAC-VLAN" self.logger.info(msg) - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmds = [ f"ethtool -K {intf} l2-fwd-offload on", f"ip link add link macvlan0 link {intf} type macvlan", @@ -163,7 +163,7 @@ class TestDcfLifeCycle(TestCase): self.is_adq_set = True return output - def remove_adq_mac_vlan(self, dut_port_id=0): + def remove_adq_mac_vlan(self, sut_port_id=0): """ Remove MAC-VLAN commands """ @@ -171,7 +171,7 @@ class TestDcfLifeCycle(TestCase): return msg = "Remove MAC-VLAN commands" self.logger.info(msg) - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmds = [ "ip link del macvlan0", f"ethtool -K {intf} l2-fwd-offload off", @@ -189,18 +189,18 @@ class TestDcfLifeCycle(TestCase): def vf_init(self): self.vf_ports_info = {} - self.dut.setup_modules(self.target, "vfio-pci", "") + self.sut_node.setup_modules(self.target, "vfio-pci", "") def vf_create(self): max_vfs = 4 - for index, port_id in enumerate(self.dut_ports): - port_obj = self.dut.ports_info[port_id]["port"] + for index, port_id in enumerate(self.sut_ports): + port_obj = self.sut_node.ports_info[port_id]["port"] pf_driver = port_obj.default_driver - self.dut.generate_sriov_vfs_by_port(port_id, max_vfs, driver=pf_driver) + self.sut_node.generate_sriov_vfs_by_port(port_id, max_vfs, driver=pf_driver) pf_pci = port_obj.pci - sriov_vfs_port = self.dut.ports_info[port_id].get("vfs_port") + sriov_vfs_port = self.sut_node.ports_info[port_id].get("vfs_port") if not sriov_vfs_port: - msg = f"failed to create vf on dut port {pf_pci}" + msg = f"failed to create vf on SUT port {pf_pci}" self.logger.error(msg) continue for port in sriov_vfs_port: @@ -223,8 +223,8 @@ class TestDcfLifeCycle(TestCase): if not self.vf_ports_info: return for port_id, _ in self.vf_ports_info.items(): - self.dut.destroy_sriov_vfs_by_port(port_id) - port_obj = self.dut.ports_info[port_id]["port"] + self.sut_node.destroy_sriov_vfs_by_port(port_id) + port_obj = self.sut_node.ports_info[port_id]["port"] port_obj.bind_driver(self.drivername) self.vf_ports_info = None @@ -249,17 +249,17 @@ class TestDcfLifeCycle(TestCase): } return allowlist - def vf_set_mac_addr(self, dut_port_id=0, vf_id=1): - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + def vf_set_mac_addr(self, sut_port_id=0, vf_id=1): + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmd = f"ip link set {intf} vf 1 mac 00:01:02:03:04:05" self.d_a_con(cmd) self.vf_testpmd2_reset_port() - def vf_set_trust(self, dut_port_id=0, vf_id=0, flag="on"): + def vf_set_trust(self, sut_port_id=0, vf_id=0, flag="on"): """ Set a VF as trust """ - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmd = f"ip link set {intf} vf {vf_id} trust {flag}" self.d_a_con(cmd) @@ -269,7 +269,7 @@ class TestDcfLifeCycle(TestCase): """ self.vf_set_trust(flag="off") - def testpmd_set_flow_rule(self, dut_port_id=0, con_name="vf_dcf"): + def testpmd_set_flow_rule(self, sut_port_id=0, con_name="vf_dcf"): """ Set switch rule to VF from DCF """ @@ -281,7 +281,7 @@ class TestDcfLifeCycle(TestCase): "actions vf id {vf_id} / end" ).format( **{ - "port": dut_port_id, + "port": sut_port_id, "vf_id": 1, "ip_src": self.get_ip_layer()["ipv4"]["src"], "ip_dst": self.get_ip_layer()["ipv4"]["dst"], @@ -293,7 +293,7 @@ class TestDcfLifeCycle(TestCase): return output def init_vf_dcf_testpmd(self): - self.vf_dcf_testpmd = self.dut.apps_name["test-pmd"] + self.vf_dcf_testpmd = self.sut_node.apps_name["test-pmd"] def start_vf_dcf_testpmd(self, pmd_opiton): allowlist_name, prefix = pmd_opiton @@ -311,7 +311,7 @@ class TestDcfLifeCycle(TestCase): **{ "bin": "".join(["./", self.vf_dcf_testpmd]), "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "allowlist": allowlist, "prefix": prefix, } @@ -346,8 +346,8 @@ class TestDcfLifeCycle(TestCase): cmd = "ps aux | grep testpmd" self.d_a_con(cmd) - def vf_dcf_testpmd_set_flow_rule(self, dut_port_id=0): - return self.testpmd_set_flow_rule(dut_port_id) + def vf_dcf_testpmd_set_flow_rule(self, sut_port_id=0): + return self.testpmd_set_flow_rule(sut_port_id) def get_vf_dcf_testpmd_start_output(self): output = self.vf_dcf_pmd_start_output @@ -356,9 +356,9 @@ class TestDcfLifeCycle(TestCase): return output def init_vf_testpmd2(self): - self.vf_testpmd2 = self.dut.apps_name["test-pmd"] + self.vf_testpmd2 = self.sut_node.apps_name["test-pmd"] self.vf_pmd2_session_name = "vf_testpmd2" - self.vf_pmd2_session = self.dut.new_session(self.vf_pmd2_session_name) + self.vf_pmd2_session = self.sut_node.new_session(self.vf_pmd2_session_name) def start_vf_testpmd2(self, pmd_opiton): allowlist_name, prefix = pmd_opiton @@ -376,7 +376,7 @@ class TestDcfLifeCycle(TestCase): **{ "bin": "".join(["./", self.vf_testpmd2]), "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "allowlist": allowlist, "prefix": prefix, } @@ -410,8 +410,8 @@ class TestDcfLifeCycle(TestCase): ] [self.vf_pmd2_con([cmd, "testpmd> ", 15]) for cmd in cmds] - def vf_testpmd2_set_flow_rule(self, dut_port_id=0): - self.testpmd_set_flow_rule(dut_port_id, con_name="vf2") + def vf_testpmd2_set_flow_rule(self, sut_port_id=0): + self.testpmd_set_flow_rule(sut_port_id, con_name="vf2") def vf_pmd2_clear_port_stats(self): cmd = "clear port stats all" @@ -428,7 +428,7 @@ class TestDcfLifeCycle(TestCase): def check_vf_pmd2_stats( self, traffic, verbose_parser, portid=0, is_traffic_valid=True ): - pmd = PmdOutput(self.dut, session=self.vf_pmd2_session) + pmd = PmdOutput(self.sut_node, session=self.vf_pmd2_session) info = pmd.get_pmd_stats(portid) or {} ori_pkt = info.get("RX-packets") or 0 traffic() @@ -455,10 +455,10 @@ class TestDcfLifeCycle(TestCase): return output def check_vf_pmd2_traffic(self, func_name, topo=None, flag=False, **kwargs): - dut_port_id, vf_id = topo if topo else [0, 1] - pkt = self.config_stream(dut_port_id, vf_id) - traffic = partial(self.send_packet_by_scapy, pkt, dut_port_id, vf_id) - verbose_parser = partial(self.parse_pmd2_verbose_pkt_count, dut_port_id, vf_id) + sut_port_id, vf_id = topo if topo else [0, 1] + scapy_pkt_builder = self.config_stream(sut_port_id, vf_id) + traffic = partial(self.send_packet_by_scapy, scapy_pkt_builder, sut_port_id, vf_id) + verbose_parser = partial(self.parse_pmd2_verbose_pkt_count, sut_port_id, vf_id) self.vf_pmd2_clear_port_stats() self.check_vf_pmd2_stats(traffic, verbose_parser) status_change_func = getattr(self, func_name) @@ -466,10 +466,10 @@ class TestDcfLifeCycle(TestCase): self.check_vf_pmd2_stats(traffic, verbose_parser, is_traffic_valid=flag) def check_vf_traffic(self, func_name, topo=None, flag=False, **kwargs): - dut_port_id, vf_id = topo if topo else [0, 1] - pkt = self.config_stream(dut_port_id, vf_id) - traffic = partial(self.send_packet_by_scapy, pkt, dut_port_id, vf_id) - verbose_parser = partial(self.parse_pmd2_verbose_pkt_count, dut_port_id, vf_id) + sut_port_id, vf_id = topo if topo else [0, 1] + scapy_pkt_builder = self.config_stream(sut_port_id, vf_id) + traffic = partial(self.send_packet_by_scapy, scapy_pkt_builder, sut_port_id, vf_id) + verbose_parser = partial(self.parse_pmd2_verbose_pkt_count, sut_port_id, vf_id) self.vf_pmd2_clear_port_stats() self.check_vf_pmd2_stats(traffic, verbose_parser) status_change_func = getattr(self, func_name) @@ -482,13 +482,13 @@ class TestDcfLifeCycle(TestCase): status_change_func(**kwargs) def send_pkt_to_vf1(self): - tester_port_id = self.tester.get_local_port(0) - tester_itf = self.tester.get_interface(tester_port_id) - p = Packet() - p.append_pkt( + tg_port_id = self.tg_node.get_local_port(0) + tg_itf = self.tg_node.get_interface(tg_port_id) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.append_pkt( 'Ether(src="00:11:22:33:44:55", dst="C6:44:32:0A:EC:E1")/IP(src="192.168.0.2", dst="192.168.0.3")/("X"*64)' ) - p.send_pkt(self.tester, tx_port=tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tg_itf) time.sleep(1) def run_test_pre(self, pmd_opitons): @@ -563,7 +563,7 @@ class TestDcfLifeCycle(TestCase): except_content = None try: self.vf_set_trust() - self.vf_set_trust(dut_port_id=1) + self.vf_set_trust(sut_port_id=1) pmd_opts = [["pf1_vf0_dcf", "dcf1"], ["pf2_vf0_dcf", "dcf2"]] self.run_test_pre(pmd_opts) self.check_support_dcf_mode_02_result() @@ -969,7 +969,7 @@ class TestDcfLifeCycle(TestCase): self.run_test_post() self.check_dcf_with_l2fwd_adp_failed_result(output) # Exit DCF mode, PF can set L2 forwarding. - self.dut.destroy_sriov_vfs_by_port(0) + self.sut_node.destroy_sriov_vfs_by_port(0) time.sleep(1) output = self.set_adq_mac_vlan() self.check_dcf_with_l2fwd_adp_result(output) @@ -994,7 +994,7 @@ class TestDcfLifeCycle(TestCase): pmd_opts = [["pf1_vf0_dcf", "dcf"]] self.run_test_pre(pmd_opts) # run PF1 DCF mode, PF2 can set L2 forwarding. - self.dut.destroy_sriov_vfs_by_port(1) + self.sut_node.destroy_sriov_vfs_by_port(1) time.sleep(1) output = self.set_adq_mac_vlan(1) self.remove_adq_mac_vlan(1) @@ -1017,8 +1017,8 @@ class TestDcfLifeCycle(TestCase): supported_drivers = ["ice"] result = all( [ - self.dut.ports_info[index]["port"].default_driver in supported_drivers - for index in self.dut_ports + self.sut_node.ports_info[index]["port"].default_driver in supported_drivers + for index in self.sut_ports ] ) msg = "current nic <{0}> is not supported".format(self.nic) @@ -1026,10 +1026,10 @@ class TestDcfLifeCycle(TestCase): def preset_pmd_res(self): self.dcf_dev_id = "8086:1889" - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.dut.init_reserved_core() - self.core_pf = self.dut.get_reserved_core("2C", 0) - self.core_vf = self.dut.get_reserved_core("2C", 0) + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.sut_node.init_reserved_core() + self.core_pf = self.sut_node.get_reserved_core("2C", 0) + self.core_vf = self.sut_node.get_reserved_core("2C", 0) def clear_flags(self): self.is_vf_dcf_pmd_on = self.is_vf_pmd2_on = False @@ -1061,7 +1061,7 @@ class TestDcfLifeCycle(TestCase): msg = "close vf devices" self.logger.info(msg) if self.vf_pmd2_session: - self.dut.close_session(self.vf_pmd2_session) + self.sut_node.close_session(self.vf_pmd2_session) self.vf_pmd2_session = None # @@ -1073,8 +1073,8 @@ class TestDcfLifeCycle(TestCase): Run at the start of each test suite. """ self.init_suite() - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports") self.verify_supported_nic() # prepare testing environment self.preset_test_environment() @@ -1102,7 +1102,7 @@ class TestDcfLifeCycle(TestCase): self.destroy_resource() self.init_suite() self.preset_test_environment() - self.dut.kill_all() + self.sut_node.kill_all() self.clear_flags() def test_support_dcf_mode_01(self): @@ -1117,7 +1117,7 @@ class TestDcfLifeCycle(TestCase): """ DCF on 2 PFs, 1 trust VF on each PF """ - self.verify(len(self.dut_ports) >= 2, "2 ports at least") + self.verify(len(self.sut_ports) >= 2, "2 ports at least") msg = "begin : DCF on 2 PFs, 1 trust VF on each PF" self.logger.info(msg) self.verify_support_dcf_mode_02() @@ -1198,7 +1198,7 @@ class TestDcfLifeCycle(TestCase): """ DCF and ADQ can be enabled on different PF """ - self.verify(len(self.dut_ports) >= 2, "2 ports at least") + self.verify(len(self.sut_ports) >= 2, "2 ports at least") msg = "begin : DCF and ADQ can be enabled on different PF" self.logger.info(msg) self.verify_dcf_with_adq_03() @@ -1223,7 +1223,7 @@ class TestDcfLifeCycle(TestCase): """ DCF and L2 forwarding can be enabled on different PF """ - self.verify(len(self.dut_ports) >= 2, "2 ports at least") + self.verify(len(self.sut_ports) >= 2, "2 ports at least") msg = "begin : DCF and L2 forwarding can be enabled on different PF" self.logger.info(msg) self.verify_dcf_with_l2fwd_03() @@ -1260,14 +1260,14 @@ class TestDcfLifeCycle(TestCase): self.verify(matched, "flow rule on port 0 is not existed") def send_pkt_to_vf1_first(self, dmac): - tester_port_id = self.tester.get_local_port(0) - tester_itf = self.tester.get_interface(tester_port_id) - p = Packet() - p.append_pkt( + tg_port_id = self.tg_node.get_local_port(0) + tg_itf = self.tg_node.get_interface(tg_port_id) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.append_pkt( 'Ether(src="00:11:22:33:44:55", dst="%s")/IP()/TCP(sport=8012)/Raw(load="X"*30)' % dmac ) - p.send_pkt(self.tester, tx_port=tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tg_itf) time.sleep(1) def pretest_handle_acl_filter(self): @@ -1304,7 +1304,7 @@ class TestDcfLifeCycle(TestCase): def create_acl_rule_by_kernel_cmd(self, port_id=0, stats=True): # create an ACL rule on PF0 by kernel command - intf = self.dut.ports_info[port_id]["port"].intf_name + intf = self.sut_node.ports_info[port_id]["port"].intf_name rule = ( "ethtool -N %s flow-type tcp4 src-ip 192.168.10.0 m 0.255.255.255 dst-port 8000 m 0x00ff action -1" % intf @@ -1336,7 +1336,7 @@ class TestDcfLifeCycle(TestCase): **{ "bin": "".join(["./", self.vf_dcf_testpmd]), "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "allowlist": allowlist, } ) @@ -1349,7 +1349,7 @@ class TestDcfLifeCycle(TestCase): def delete_acl_rule_by_kernel_cmd(self, port_id=0): # delete the kernel ACL rule - intf = self.dut.ports_info[port_id]["port"].intf_name + intf = self.sut_node.ports_info[port_id]["port"].intf_name self.d_a_con("ethtool -N %s delete %s" % (intf, self.rule_id)) def test_handle_acl_filter_01(self): @@ -1424,7 +1424,7 @@ class TestDcfLifeCycle(TestCase): **{ "bin": "".join(["./", self.vf_dcf_testpmd]), "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "allowlist": allowlist, "prefix": "vf0", } @@ -1478,7 +1478,7 @@ class TestDcfLifeCycle(TestCase): ) # Reset VF1 by setting mac addr - intf = self.dut.ports_info[0]["port"].intf_name + intf = self.sut_node.ports_info[0]["port"].intf_name self.d_a_con("ip link set %s vf 1 mac 00:01:02:03:04:05" % intf) [self.vf_pmd2_con([cmd, "testpmd> ", 15]) for cmd in cmds] self.clear_vf_pmd2_port0_stats() @@ -1625,14 +1625,14 @@ class TestDcfLifeCycle(TestCase): ] [self.d_con([cmd, "testpmd> ", 15]) for cmd in cmds] - def vf_dcf_reset_mtu(self, dut_port_id=0, vf_id=0): - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + def vf_dcf_reset_mtu(self, sut_port_id=0, vf_id=0): + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmd = f"ifconfig {intf} mtu 3000" self.d_a_con(cmd) self.vf_dcf_testpmd_reset_port() - def vf_dcf_set_mac_addr(self, dut_port_id=0, vf_id=0): - intf = self.dut.ports_info[dut_port_id]["port"].intf_name + def vf_dcf_set_mac_addr(self, sut_port_id=0, vf_id=0): + intf = self.sut_node.ports_info[sut_port_id]["port"].intf_name cmd = f"ip link set {intf} vf 0 mac 00:01:02:03:04:05" self.d_a_con(cmd) self.vf_dcf_testpmd_reset_port() diff --git a/tests/TestSuite_ddp_gtp.py b/tests/TestSuite_ddp_gtp.py index d0c3b560..d030f012 100644 --- a/tests/TestSuite_ddp_gtp.py +++ b/tests/TestSuite_ddp_gtp.py @@ -24,13 +24,13 @@ class TestDdpGtp(TestCase): self.verify( self.is_eth_series_nic(700), "ddp gtp can not support %s nic" % self.nic ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None self.env_done = False profile_file = "dep/gtp.pkgo" profile_dst = "/tmp/" - self.dut.session.copy_file_to(profile_file, profile_dst) + self.sut_node.session.copy_file_to(profile_file, profile_dst) self.PF_Q_strip = "RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF" # commit ee653bd8, queue number of per vf default value is defined # in drivers/net/i40e/i40e_ethdev.c, named as RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF @@ -46,32 +46,32 @@ class TestDdpGtp(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def insmod_modprobe(self, modename=""): """ Insmod modProbe before run test case """ if modename == "igb_uio": - self.dut.send_expect("modprobe uio", "#", 10) - out = self.dut.send_expect("lsmod | grep igb_uio", "#") + self.sut_node.send_expect("modprobe uio", "#", 10) + out = self.sut_node.send_expect("lsmod | grep igb_uio", "#") if "igb_uio" in out: - self.dut.send_expect("rmmod -f igb_uio", "#", 10) - self.dut.send_expect( + self.sut_node.send_expect("rmmod -f igb_uio", "#", 10) + self.sut_node.send_expect( "insmod ./" + self.target + "/kmod/igb_uio.ko", "#", 10 ) - out = self.dut.send_expect("lsmod | grep igb_uio", "#") + out = self.sut_node.send_expect("lsmod | grep igb_uio", "#") assert "igb_uio" in out, "Failed to insmod igb_uio" def set_up(self): - self.dut_testpmd = PmdOutput(self.dut) - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) + self.sut_testpmd = PmdOutput(self.sut_node) + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) if "vf" in self._suite_result.test_case: self.insmod_modprobe("igb_uio") - self.bind_nic_driver(self.dut_ports, "igb_uio") + self.bind_nic_driver(self.sut_ports, "igb_uio") self.setup_vm_env() self.load_profile() self.vm0_testpmd.start_testpmd( @@ -90,10 +90,10 @@ class TestDdpGtp(TestCase): Search max queue number from configuration. """ if Q_strip is self.PF_Q_strip: - out = self.dut.send_expect("cat config/rte_config.h", "]# ", 10) + out = self.sut_node.send_expect("cat config/rte_config.h", "]# ", 10) pattern = "define (%s) (\d*)" % Q_strip else: - out = self.dut.send_expect("cat drivers/net/i40e/i40e_ethdev.c", "]# ", 10) + out = self.sut_node.send_expect("cat drivers/net/i40e/i40e_ethdev.c", "]# ", 10) pattern = "#define %s\s*(\d*)" % Q_strip s = re.compile(pattern) res = s.search(out) @@ -112,40 +112,40 @@ class TestDdpGtp(TestCase): Create testing environment with VF generated from 1PF """ if self.env_done is False: - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) time.sleep(1) vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} # set up VM0 ENV - self.vm0 = QEMUKvm(self.dut, "vm0", "ddp_gtp") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "ddp_gtp") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm0_dut_ports = self.vm0_dut.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm0_dut) + self.vm0_sut_ports = self.vm0_sut.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm0_sut) self.env_done = True def destroy_vm_env(self): if getattr(self, "vm0", None): - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None - if getattr(self, "used_dut_port", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] - self.used_dut_port = None + if getattr(self, "used_sut_port", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] + self.used_sut_port = None self.env_done = False @@ -155,24 +155,24 @@ class TestDdpGtp(TestCase): profile will be stored in binary file and need to be passed to AQ to program Intel® Ethernet 700 Series during initialization stage. """ - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), ) - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - out = self.dut_testpmd.execute_cmd("ddp get list 0") - self.dut_testpmd.execute_cmd("ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") time.sleep(1) - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") time.sleep(2) def gtp_packets( @@ -316,7 +316,7 @@ class TestDdpGtp(TestCase): def gtp_test(self, type="fdir", port="pf", tunnel_pkt="gtpu", inner_L3="ipv4"): """ - Send GTP packet to dut, receive packet from configured queue. + Send GTP packet to SUT, receive packet from configured queue. Input: filter type, port type, packet type, inner L3 type """ queue = random.randint(1, self.PF_QUEUE - 1) @@ -327,19 +327,19 @@ class TestDdpGtp(TestCase): wrong_teid = hex((random_teid + 2) % int(0xFFFFFFFF)) if type is "fdir": if inner_L3 is None: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / udp / \ %s teid is %s / end actions queue index %d / end" % (tunnel_pkt, correct_teid, queue) ) else: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / udp / \ %s teid is %s / %s / end actions queue index %d / end" % (tunnel_pkt, correct_teid, inner_L3, queue) ) if type is "clfter": - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / udp / \ %s teid is %s / end actions %s / queue index %d / end" % (tunnel_pkt, correct_teid, port, queue) @@ -356,15 +356,15 @@ class TestDdpGtp(TestCase): ) for packet_type in list(pkts.keys()): count = count + 1 - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' - % (pkts[packet_type], self.tester_intf) + % (pkts[packet_type], self.tg_intf) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() if port is "pf": - out = self.dut.get_session_output(timeout=5) + out = self.sut_node.get_session_output(timeout=5) else: - out = self.vm0_dut.get_session_output(timeout=5) + out = self.vm0_sut.get_session_output(timeout=5) self.verify( count == out.count("port 0/queue %d" % queue), "Failed to receive packet in this queue!!!", @@ -464,18 +464,18 @@ class TestDdpGtp(TestCase): def tear_down(self): if "vf" in self._suite_result.test_case: self.destroy_vm_env() - self.dut_testpmd.execute_cmd("stop") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "Profile number is: 0" not in out: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/gtp.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/gtp.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to delete ddp profile!!!" ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.quit() + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.quit() def tear_down_all(self): if self.env_done: diff --git a/tests/TestSuite_ddp_gtp_qregion.py b/tests/TestSuite_ddp_gtp_qregion.py index a3dea1b2..d6367e8c 100644 --- a/tests/TestSuite_ddp_gtp_qregion.py +++ b/tests/TestSuite_ddp_gtp_qregion.py @@ -8,7 +8,7 @@ import time from scapy.all import * -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.pmd_output import PmdOutput from framework.settings import get_nic_name @@ -17,19 +17,19 @@ from framework.test_case import TestCase class TestDdpGtpQregion(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") profile_file = "dep/gtp.pkgo" profile_dst = "/tmp/" - self.dut.session.copy_file_to(profile_file, profile_dst) - out = self.dut.send_expect("cat config/rte_config.h", "]# ", 10) + self.sut_node.session.copy_file_to(profile_file, profile_dst) + out = self.sut_node.send_expect("cat config/rte_config.h", "]# ", 10) self.PF_Q_strip = "RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF" pattern = "define (%s) (\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern) - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - self.dut_testpmd = PmdOutput(self.dut) + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + self.sut_testpmd = PmdOutput(self.sut_node) def set_up(self): self.load_profile() @@ -54,18 +54,18 @@ class TestDdpGtpQregion(TestCase): profile will be stored in binary file and need to be passed to AQ to program Intel® Ethernet 700 Series during initialization stage. """ - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), ) - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") def flowtype_qregion_mapping(self): """ @@ -77,18 +77,18 @@ class TestDdpGtpQregion(TestCase): q_nums = [8, 16, 8, 16] flowtypes = [26, 23, 24, 25] for rg_id, idx_id, q_num in zip(rg_ids, idx_ids, q_nums): - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "set port 0 queue-region region_id \ %d queue_start_index %d queue_num %d" % (rg_id, idx_id, q_num) ) for rg_id, flowtype in zip(rg_ids, flowtypes): - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "set port 0 queue-region region_id \ %d flowtype %d" % (rg_id, flowtype) ) - self.dut_testpmd.execute_cmd("set port 0 queue-region flush on") + self.sut_testpmd.execute_cmd("set port 0 queue-region flush on") def gtp_pkts(self, flowtype, keyword, opt): """ @@ -152,9 +152,9 @@ class TestDdpGtpQregion(TestCase): if flowtype == 26: a = 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header(teid=0xfe)/IP(dst="1.1.1.1", src="2.2.2.2")/UDP(dport=100, sport=200)/Raw("X"*20)' rawfile_src = "/tmp/test_gtp.raw" - packet.write_raw_pkt(a, rawfile_src) + scapy_pkt_builder.write_raw_pkt(a, rawfile_src) rawfile_dst = "/tmp/" - self.dut.session.copy_file_to(rawfile_src, rawfile_dst) + self.sut_node.session.copy_file_to(rawfile_src, rawfile_dst) def send_verify_fd(self, flowtype, keyword, opt): """ @@ -162,9 +162,9 @@ class TestDdpGtpQregion(TestCase): """ pkts = self.gtp_pkts(flowtype, keyword, opt) for packet_type in list(pkts.keys()): - pkt = packet.Packet(pkts[packet_type]) - pkt.send_pkt(crb=self.tester, tx_port=self.tester_intf) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder(pkts[packet_type]) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tg_intf) + out = self.sut_node.get_session_output(timeout=2) pattern = "port (\d)/queue (\d{1,2}): received (\d) packets" qnum = self.element_strip(out, pattern) ptypes = packet_type.split("/") @@ -218,9 +218,9 @@ class TestDdpGtpQregion(TestCase): keyword = "src_ipv6" pkts = self.gtp_pkts(flowtype, keyword, opt) for packet_type in list(pkts.keys()): - pkt = packet.Packet(pkts[packet_type]) - pkt.send_pkt(crb=self.tester, tx_port=self.tester_intf) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder(pkts[packet_type]) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tg_intf) + out = self.sut_node.get_session_output(timeout=2) self.verify("RTE_MBUF_F_RX_RSS_HASH" in out, "Failed to test RSS!!!") pattern = "port (\d)/queue (\d{1,2}): received (\d) packets" qnum = self.element_strip(out, pattern) @@ -269,8 +269,8 @@ class TestDdpGtpQregion(TestCase): ) def flowtype_pctype_mapping(self, flowtype, pctype): - self.dut_testpmd.execute_cmd("port config 0 pctype mapping reset") - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + self.sut_testpmd.execute_cmd("port config 0 pctype mapping reset") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: 63 -> flowtype: 14" in out, "Failed show flow type to pctype mapping!!!", @@ -279,10 +279,10 @@ class TestDdpGtpQregion(TestCase): "pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed show flow type to pctype mapping!!!", ) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype mapping update %s %s" % (pctype, flowtype) ) - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: %s -> flowtype: %s" % (pctype, flowtype) in out, "Failed update flow type to pctype mapping!!!", @@ -306,25 +306,25 @@ class TestDdpGtpQregion(TestCase): self.flowtype_pctype_mapping(flowtype, pctype) if crlwords is not None: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s fdir_inset clear all" % pctype ) for word in crlwords: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s fdir_inset set field %s" % (pctype, word) ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") - self.dut_testpmd.wait_link_status_up(self.dut_ports[0]) + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") + self.sut_testpmd.wait_link_status_up(self.sut_ports[0]) qnum = self.send_verify_fd(flowtype, keywords, "word_opt") self.verify(qnum == 0, "Receive packet from wrong queue!!!") self.raw_packet_generate(flowtype) queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow_director_filter 0 mode raw add flow %d fwd queue %d \ fd_id 1 packet /tmp/test_gtp.raw" % (flowtype, queue) @@ -375,21 +375,21 @@ class TestDdpGtpQregion(TestCase): """ self.flowtype_qregion_mapping() self.flowtype_pctype_mapping(flowtype, pctype) - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s hash_inset clear all" % pctype ) for word in crlwords: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s hash_inset set field %s" % (pctype, word) ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("port config all rss %s" % flowtype) - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") - self.dut_testpmd.wait_link_status_up(self.dut_ports[0]) + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port config all rss %s" % flowtype) + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") + self.sut_testpmd.wait_link_status_up(self.sut_ports[0]) self.send_and_verify(flowtype, qmin, qmax, keyword) def test_outer_dst_contrl_gtpcq(self): @@ -639,19 +639,19 @@ class TestDdpGtpQregion(TestCase): self.run_gtp_test(crlwords, 23, 23, 10, 25, "dst_ipv6_32pre") def tear_down(self): - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("set port 0 queue-region flush off") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("set port 0 queue-region flush off") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "Profile number is: 0" not in out: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/gtp.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/gtp.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to delete ddp profile!!!" ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.quit() + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.quit() def tear_down_all(self): pass diff --git a/tests/TestSuite_ddp_l2tpv3.py b/tests/TestSuite_ddp_l2tpv3.py index ef50824b..ea4ccea8 100644 --- a/tests/TestSuite_ddp_l2tpv3.py +++ b/tests/TestSuite_ddp_l2tpv3.py @@ -18,13 +18,13 @@ from framework.test_case import TestCase class TestDdpL2tpv3(TestCase): def set_up_all(self): - self.dut.session.copy_file_to("dep/l2tpv3oip-l4.pkg", "/tmp/") - self.dut_testpmd = PmdOutput(self.dut) - self.dut_ports = self.dut.get_ports(self.nic) - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - out = self.dut.send_expect("cat config/rte_config.h", "]# ", 10) + self.sut_node.session.copy_file_to("dep/l2tpv3oip-l4.pkg", "/tmp/") + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + out = self.sut_node.send_expect("cat config/rte_config.h", "]# ", 10) self.PF_Q_strip = "RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF" pattern = "define (%s) (\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern) @@ -51,20 +51,20 @@ class TestDdpL2tpv3(TestCase): Load profile to update Intel® Ethernet 700 Series configuration tables, profile will be stored in binary file. """ - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s --disable-rss" % (self.PF_QUEUE, self.PF_QUEUE), ) - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "ddp add 0 /tmp/l2tpv3oip-l4.pkg,/tmp/l2tpv3oip-l4.bak" ) - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") time.sleep(3) def l2tpv3pkts(self, keyword): @@ -236,11 +236,11 @@ class TestDdpL2tpv3(TestCase): pkt = self.l2tpv3pkts(keyword) qnum = [] for i in range(len(pkt)): - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkt[i], self.tester_intf) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkt[i], self.tg_intf) ) - self.tester.scapy_execute() - out = self.dut.get_session_output(timeout=2) + self.tg_node.scapy_execute() + out = self.sut_node.get_session_output(timeout=2) pattern = "port (\d)/queue (\d{1,2}): received (\d) packets" qnum.append(self.element_strip(out, pattern)) return qnum @@ -251,22 +251,22 @@ class TestDdpL2tpv3(TestCase): keywords: keywords have IPv4/IPv6 SIP DIP and UDP """ - self.dut_testpmd.execute_cmd("port stop all") - self.dut_testpmd.execute_cmd("port config 0 pctype 28 fdir_inset clear all") - self.dut_testpmd.execute_cmd("port config 0 pctype 38 fdir_inset clear all") + self.sut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port config 0 pctype 28 fdir_inset clear all") + self.sut_testpmd.execute_cmd("port config 0 pctype 38 fdir_inset clear all") if crlwords_ipv4 is not None: for field in crlwords_ipv4: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype 28 fdir_inset set field {}".format(field) ) if crlwords_ipv6 is not None: for field in crlwords_ipv6: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype 38 fdir_inset set field {}".format(field) ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("set verbose 1") qdef = [] qnum = self.send_and_verify(keyword) for i in range(len(qnum)): @@ -277,7 +277,7 @@ class TestDdpL2tpv3(TestCase): queue = random.randint(1, self.PF_QUEUE - 1) if "l2tpv3oipv4" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -290,7 +290,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching sessionID if "l2tpv3oipv4_dst" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 dst is 8.8.8.8 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -306,7 +306,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching DIP if "l2tpv3oipv4_src" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 src is 1.1.1.1 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -322,7 +322,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching SIP if "l2tpv3oipv4_src_dst" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 src is 5.5.5.5 dst is 2.2.2.2 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -341,7 +341,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching DIP if "l2tpv3oipv6" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -354,7 +354,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching sessionID if "l2tpv3oipv6_dst" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 dst is 8:7:6:5:4:3:2:1 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -370,7 +370,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching DIP if "l2tpv3oipv6_src" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 src is 1:2:3:4:5:6:7:8 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -386,7 +386,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching SIP if "l2tpv3oipv6_src_dst" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 src is 2:3:4:5:6:7:8:9 dst is 6:5:4:3:2:1:8:9 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -405,7 +405,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching DIP if "l2tpv3_ipv4_ipv6" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -417,7 +417,7 @@ class TestDdpL2tpv3(TestCase): 0 ) # Default Queue number to check for in case of non matching sessionID queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -430,7 +430,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching sessionID if "l2tpv3oip_v4src_v6src" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 src is 1.3.5.7 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -445,7 +445,7 @@ class TestDdpL2tpv3(TestCase): 0 ) # Default Queue number to check for in case of non matching IPv4 SIP queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 src is 1:3:5:7:9:2:4:6 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -461,7 +461,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching IPv6 SIP if "l2tpv3oip_v4dst_v6dst" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 dst is 9.7.5.3 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -476,7 +476,7 @@ class TestDdpL2tpv3(TestCase): 0 ) # Default Queue number to check for in case of non matching IPv4 DIP queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 dst is 2:4:6:8:1:3:5:7 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -492,7 +492,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching IPv6 DIP if "l2tpv3oip_v4srcdst_v6srcdst" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 src is 9.8.7.6 dst is 4.5.6.7 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -510,7 +510,7 @@ class TestDdpL2tpv3(TestCase): 0 ) # Default Queue number to check for in case of non matching IPv4 DIP queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 src is 1:2:3:4:5:6:7:8 dst is 9:8:7:6:5:4:3:2 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -529,7 +529,7 @@ class TestDdpL2tpv3(TestCase): ) # Default Queue number to check for in case of non matching IPv6 DIP if "l2tpv3oip_v4_v6_udp" is keyword: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -541,7 +541,7 @@ class TestDdpL2tpv3(TestCase): 0 ) # Default Queue number to check for in case of non matching sessionID queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv6 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) @@ -553,7 +553,7 @@ class TestDdpL2tpv3(TestCase): 0 ) # Default Queue number to check for in case of non matching sessionID queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / udp / end actions queue index {} / end".format( queue ) @@ -566,7 +566,7 @@ class TestDdpL2tpv3(TestCase): qdef[i] == qnum[i], "Receive packet from wrong queue{}_{}!!!".format(qdef[i], qnum[i]), ) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow flush 0" ) # Delete all the flow director rules @@ -697,16 +697,16 @@ class TestDdpL2tpv3(TestCase): Step1: Check if it is already loaded, if loaded delete the same Step2: Load the profile """ - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "L2TPv3oIP with L4 payload" in out: print("Profile is already loaded!!") - out = self.dut_testpmd.execute_cmd("ddp del 0 /tmp/l2tpv3oip-l4.bak") - out = self.dut_testpmd.execute_cmd( + out = self.sut_testpmd.execute_cmd("ddp del 0 /tmp/l2tpv3oip-l4.bak") + out = self.sut_testpmd.execute_cmd( "ddp add 0 /tmp/l2tpv3oip-l4.pkg,/tmp/l2tpv3oip-l4.bak" ) - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("L2TPv3oIP with L4 payload" in out, "Failed to Load DDP profile ") def test_l2tpv3oip_delete_profile(self): @@ -715,24 +715,24 @@ class TestDdpL2tpv3(TestCase): Step1: Check if profile is loaded, if loaded, delete the same Step2: Add the profile again """ - self.dut_testpmd.execute_cmd("port stop all") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("port stop all") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "L2TPv3oIP with L4 payload" not in out: - out = self.dut_testpmd.execute_cmd( + out = self.sut_testpmd.execute_cmd( "ddp add 0 /tmp/l2tpv3oip-l4.pkg,/tmp/l2tpv3oip-l4.bak" ) - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "L2TPv3oIP with L4 payload" in out, "Error in loading the Profile" ) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/l2tpv3oip-l4.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/l2tpv3oip-l4.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 0" in out, "Error in @@deleting the Profile !!") - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "ddp add 0 /tmp/l2tpv3oip-l4.pkg,/tmp/l2tpv3oip-l4.bak" ) - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("L2TPv3oIP with L4 payload" in out, "Error in loading the Profile") def test_l2tpv3oip_delete_rules(self): @@ -743,46 +743,46 @@ class TestDdpL2tpv3(TestCase): Step3: Flush all rules """ queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1001 / end actions queue index {} / end".format( queue ) ) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1002 / end actions queue index {} / end".format( queue ) ) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1003 / end actions queue index {} / end".format( queue ) ) - out = self.dut_testpmd.execute_cmd("flow list 0") + out = self.sut_testpmd.execute_cmd("flow list 0") verify = out.splitlines() self.verify(len(verify) == 6, "Flow rules not added") - self.dut_testpmd.execute_cmd("flow destroy 0 rule 0") - out = self.dut_testpmd.execute_cmd("flow list 0") + self.sut_testpmd.execute_cmd("flow destroy 0 rule 0") + out = self.sut_testpmd.execute_cmd("flow list 0") verify = out.splitlines() self.verify(len(verify) == 5, "Flow rules not destroyed") - self.dut_testpmd.execute_cmd("flow flush 0") - out = self.dut_testpmd.execute_cmd("flow list 0") + self.sut_testpmd.execute_cmd("flow flush 0") + out = self.sut_testpmd.execute_cmd("flow list 0") verify = out.splitlines() self.verify(len(verify) == 1, "Flow rules not destroyed") def tear_down(self): - self.dut_testpmd.execute_cmd("stop") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "Profile number is: 0" not in out: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/l2tpv3oip-l4.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/l2tpv3oip-l4.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to delete ddp profile!!!" ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.quit() + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.quit() def tear_down_all(self): pass diff --git a/tests/TestSuite_ddp_mpls.py b/tests/TestSuite_ddp_mpls.py index f0d4ef7d..bb9f7b8c 100644 --- a/tests/TestSuite_ddp_mpls.py +++ b/tests/TestSuite_ddp_mpls.py @@ -26,13 +26,13 @@ class Testddp_mpls(TestCase): self.verify( self.is_eth_series_nic(700), "ddp mpls can not support %s nic" % self.nic ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None self.env_done = False profile_file = r"dep/mpls.pkgo" profile_dst = "/tmp/" - self.dut.session.copy_file_to(profile_file, profile_dst) + self.sut_node.session.copy_file_to(profile_file, profile_dst) self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: @@ -42,7 +42,7 @@ class Testddp_mpls(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def set_up(self): self.setup_vm_env() @@ -52,36 +52,36 @@ class Testddp_mpls(TestCase): Create testing environment with VF generated from 1PF """ if self.env_done == False: - self.bind_nic_driver(self.dut_ports[:1], driver="igb_uio") - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) + self.bind_nic_driver(self.sut_ports[:1], driver="igb_uio") + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) time.sleep(1) - self.dut_testpmd = PmdOutput(self.dut) + self.sut_testpmd = PmdOutput(self.sut_node) time.sleep(1) vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} # set up VM0 ENV - self.vm0 = QEMUKvm(self.dut, "vm0", "ddp_mpls") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "ddp_mpls") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm0_dut_ports = self.vm0_dut.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm0_dut) + self.vm0_sut_ports = self.vm0_sut.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm0_sut) self.env_done = True - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (PF_MAX_QUEUE, PF_MAX_QUEUE), ) @@ -94,17 +94,17 @@ class Testddp_mpls(TestCase): def destroy_vm_env(self): if getattr(self, "vm0", None): - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None - if getattr(self, "used_dut_port", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] - self.used_dut_port = None + if getattr(self, "used_sut_port", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] + self.used_sut_port = None self.env_done = False @@ -114,29 +114,29 @@ class Testddp_mpls(TestCase): profile will be stored in binary file and need to be passed to AQ to program Intel® Ethernet 700 Series during initialization stage. """ - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to get ddp profile info list!!!" ) - self.dut_testpmd.execute_cmd("ddp add 0 /tmp/mpls.pkgo,/tmp/mpls.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp add 0 /tmp/mpls.pkgo,/tmp/mpls.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") time.sleep(1) def mpls_test(self, port="pf", pkt="udp"): """ - Send mpls packet to dut, receive packet from configured queue. + Send mpls packet to SUT, receive packet from configured queue. Input: port type, packet type """ pkts = [] if port == "pf": queue = random.randint(1, PF_MAX_QUEUE - 1) - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") else: queue = random.randint(1, VF_MAX_QUEUE - 1) self.vm0_testpmd.execute_cmd("set fwd rxonly") @@ -145,7 +145,7 @@ class Testddp_mpls(TestCase): random_label = random.randint(0x0, 0xFFFFF) label = hex(random_label) wrong_label = hex((random_label + 2) % int(0xFFFFF)) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4\ / %s / mpls label is %s / end actions %s / queue index %d / end" % (pkt, label, port, queue) @@ -170,14 +170,14 @@ class Testddp_mpls(TestCase): % label, } for packet_type in list(pkts.keys()): - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tg_intf) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() if port == "pf": - out = self.dut.get_session_output(timeout=2) + out = self.sut_node.get_session_output(timeout=2) else: - out = self.vm0_dut.get_session_output(timeout=2) + out = self.vm0_sut.get_session_output(timeout=2) self.verify( "port 0/queue %d" % queue in out, @@ -229,19 +229,19 @@ class Testddp_mpls(TestCase): def tear_down(self): self.vm0_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("stop") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "Profile number is: 0" not in out: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/mpls.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/mpls.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to delete mpls profile!!!" ) - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") self.vm0_testpmd.quit() - self.dut_testpmd.quit() + self.sut_testpmd.quit() def tear_down_all(self): self.destroy_vm_env() diff --git a/tests/TestSuite_ddp_ppp_l2tp.py b/tests/TestSuite_ddp_ppp_l2tp.py index c0890a48..50667b91 100644 --- a/tests/TestSuite_ddp_ppp_l2tp.py +++ b/tests/TestSuite_ddp_ppp_l2tp.py @@ -17,19 +17,19 @@ from framework.test_case import TestCase class TestDdpPppL2tp(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") profile_file = "dep/ppp-oe-ol2tpv2.pkgo" profile_dst = "/tmp/" - self.dut.session.copy_file_to(profile_file, profile_dst) - out = self.dut.send_expect("cat config/rte_config.h", "]# ", 10) + self.sut_node.session.copy_file_to(profile_file, profile_dst) + out = self.sut_node.send_expect("cat config/rte_config.h", "]# ", 10) self.PF_Q_strip = "RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF" pattern = "define (%s) (\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern) - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - self.dut_testpmd = PmdOutput(self.dut) + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + self.sut_testpmd = PmdOutput(self.sut_node) def set_up(self): self.load_profile() @@ -53,20 +53,20 @@ class TestDdpPppL2tp(TestCase): profile will be stored in binary file and need to be passed to AQ to program Intel® Ethernet 700 Series during initialization stage. """ - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), ) - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "ddp add 0 /tmp/ppp-oe-ol2tpv2.pkgo,/tmp/ppp-oe-ol2tpv2.bak" ) - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") def ppp_l2tp_pkts(self, flowtype, keyword): """ @@ -199,7 +199,7 @@ class TestDdpPppL2tp(TestCase): File.write(ba) File.close() rawfile_dst = "/tmp/" - self.dut.session.copy_file_to(rawfile_src, rawfile_dst) + self.sut_node.session.copy_file_to(rawfile_src, rawfile_dst) def send_and_verify(self, flowtype, keyword="def", type="rss"): """ @@ -207,11 +207,11 @@ class TestDdpPppL2tp(TestCase): """ pkts = self.ppp_l2tp_pkts(flowtype, keyword) for packet_type in list(pkts.keys()): - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tg_intf) ) - self.tester.scapy_execute() - out = self.dut.get_session_output(timeout=2) + self.tg_node.scapy_execute() + out = self.sut_node.get_session_output(timeout=2) print(out) if type is "rss": self.verify("RTE_MBUF_F_RX_RSS_HASH" in out, "Failed to test RSS!!!") @@ -233,8 +233,8 @@ class TestDdpPppL2tp(TestCase): """ dynamic flowtype/pctype mapping for new protocol. """ - self.dut_testpmd.execute_cmd("port config 0 pctype mapping reset") - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + self.sut_testpmd.execute_cmd("port config 0 pctype mapping reset") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: 63 -> flowtype: 14" in out, "Failed show flow type to pctype mapping!!!", @@ -243,16 +243,16 @@ class TestDdpPppL2tp(TestCase): "pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed show flow type to pctype mapping!!!", ) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype mapping update %s %s" % (pctype, flowtype) ) - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: %s -> flowtype: %s" % (pctype, flowtype) in out, "Failed update flow type to pctype mapping!!!", ) - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") def run_rss_test(self, crlwords, flowtype, pctype, keywords, qchecks): """ @@ -273,18 +273,18 @@ class TestDdpPppL2tp(TestCase): self.pctype_flowtype_mapping(flowtype, pctype) if crlwords is not None: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s hash_inset clear all" % pctype ) for word in crlwords: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s hash_inset set field %s" % (pctype, word) ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("port config all rss %s" % flowtype) - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port config all rss %s" % flowtype) + self.sut_testpmd.execute_cmd("start") qnum = self.send_and_verify(flowtype, "def", "rss") qdef = qnum for word, chk in zip(keywords, qchecks): @@ -314,22 +314,22 @@ class TestDdpPppL2tp(TestCase): self.pctype_flowtype_mapping(flowtype, pctype) if crlwords is not None: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s fdir_inset clear all" % pctype ) for word in crlwords: - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype %s fdir_inset set field %s" % (pctype, word) ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("start") qnum = self.send_and_verify(flowtype, "def", "fd") self.verify(qnum == 0, "Receive packet from wrong queue!!!") self.raw_packet_generate(flowtype) queue = random.randint(1, self.PF_QUEUE - 1) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "flow_director_filter 0 mode raw add flow %d fwd queue %d \ fd_id 1 packet /tmp/test.raw" % (flowtype, queue) @@ -588,18 +588,18 @@ class TestDdpPppL2tp(TestCase): self.run_fd_test(crlwords, 24, 19, keywords, qchecks) def tear_down(self): - self.dut_testpmd.execute_cmd("stop") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "Profile number is: 0" not in out: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/ppp-oe-ol2tpv2.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/ppp-oe-ol2tpv2.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to delete ddp profile!!!" ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.quit() + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.quit() def tear_down_all(self): pass diff --git a/tests/TestSuite_distributor.py b/tests/TestSuite_distributor.py index 7b41eb01..4216eb97 100644 --- a/tests/TestSuite_distributor.py +++ b/tests/TestSuite_distributor.py @@ -9,8 +9,8 @@ import os import re import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestDistributor(TestCase): @@ -18,13 +18,13 @@ class TestDistributor(TestCase): """ Run at the start of each test suite. """ - out = self.dut.build_dpdk_apps("./examples/distributor") + out = self.sut_node.build_dpdk_apps("./examples/distributor") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") - self.dut_ports = self.dut.get_ports() - self.app_distributor_path = self.dut.apps_name["distributor"] - self.app_test_path = self.dut.apps_name["test"] + self.sut_ports = self.sut_node.get_ports() + self.app_distributor_path = self.sut_node.apps_name["distributor"] + self.app_test_path = self.sut_node.apps_name["test"] # get dts output path if self.logger.log_path.startswith(os.sep): self.output_path = self.logger.log_path @@ -32,7 +32,7 @@ class TestDistributor(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -44,26 +44,26 @@ class TestDistributor(TestCase): """ Run distributor unit test """ - eal_para = self.dut.create_eal_parameters(cores=[0, 1, 2, 3]) - self.dut.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) - out = self.dut.send_expect("distributor_autotest", "RTE>>", 30) - self.dut.send_expect("quit", "# ") + eal_para = self.sut_node.create_eal_parameters(cores=[0, 1, 2, 3]) + self.sut_node.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) + out = self.sut_node.send_expect("distributor_autotest", "RTE>>", 30) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_distributor_unit_perf(self): """ Run distributor unit perf test """ - eal_para = self.dut.create_eal_parameters(cores=[0, 1, 2, 3]) - self.dut.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) - out = self.dut.send_expect("distributor_perf_autotest", "RTE>>", 120) + eal_para = self.sut_node.create_eal_parameters(cores=[0, 1, 2, 3]) + self.sut_node.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) + out = self.sut_node.send_expect("distributor_perf_autotest", "RTE>>", 120) cycles_single = self.strip_cycles(out, "single") cycles_burst = self.strip_cycles(out, "burst") self.logger.info( "Cycles for single mode is %d burst mode is %d" % (cycles_single, cycles_burst) ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") self.verify( cycles_single > cycles_burst * 2, "Burst performance should be much better" @@ -73,7 +73,7 @@ class TestDistributor(TestCase): """ Run distributor perf test, recorded statistic of Rx/Enqueue/Sent/Dequeue/Tx """ - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.verify(len(self.sut_ports) >= 1, "Not enough ports") workers = [1, 2, 3, 4, 8, 16, 32] table_header = [ "Number of workers", @@ -88,14 +88,14 @@ class TestDistributor(TestCase): # output port is calculated from overall ports number cmd_fmt = "%s %s -- -p 0x1" - socket = self.dut.get_numa_id(self.dut_ports[0]) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) pcap = os.sep.join([self.output_path, "distributor.pcap"]) - self.tester.scapy_append('wrpcap("%s", [Ether()/IP()/("X"*26)])' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [Ether()/IP()/("X"*26)])' % pcap) + self.tg_node.scapy_execute() tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) pcap = os.sep.join([self.output_path, "distributor.pcap"]) tgen_input.append((tx_port, rx_port, pcap)) @@ -103,27 +103,27 @@ class TestDistributor(TestCase): self.result_table_create(table_header) for worker_num in workers: # Rx core/distributor core/Tx core/stats core - cores = self.dut.get_core_list("1S/%dC/1T" % (worker_num + 4), socket) + cores = self.sut_node.get_core_list("1S/%dC/1T" % (worker_num + 4), socket) # If can't get enough core from one socket, just use all lcores if len(cores) < (worker_num + 4): cores = self._get_thread_lcore(worker_num + 4) - eal_para = self.dut.create_eal_parameters(cores=cores, ports=[0]) + eal_para = self.sut_node.create_eal_parameters(cores=cores, ports=[0]) cmd = cmd_fmt % (self.app_distributor_path, eal_para) - self.dut.send_expect(cmd, "doing packet RX", timeout=30) + self.sut_node.send_expect(cmd, "doing packet RX", timeout=30) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) # get aap output after sending packet - self.app_output = self.dut.session.get_session_before(timeout=2) + self.app_output = self.sut_node.session.get_session_before(timeout=2) - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") pps /= 1000000.0 rx, enq, sent, deq, trans = self.strip_performance_data(self.app_output) @@ -138,10 +138,10 @@ class TestDistributor(TestCase): """ Check distributor app work fine with maximum workers """ - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.verify(len(self.sut_ports) >= 1, "Not enough ports") cmd_fmt = "%s %s -- -p 0x1" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "sed -n '/#define RTE_DISTRIB_MAX_WORKERS/p' lib/distributor/distributor_private.h", "# ", trim_whitespace=False, @@ -152,49 +152,49 @@ class TestDistributor(TestCase): max_workers = int(m.group(1)) cores = self._get_thread_lcore(max_workers - 1 + 4) - eal_para = self.dut.create_eal_parameters(cores=cores, ports=[0]) + eal_para = self.sut_node.create_eal_parameters(cores=cores, ports=[0]) cmd = cmd_fmt % (self.app_distributor_path, eal_para) - self.dut.send_expect(cmd, "doing packet RX", timeout=30) + self.sut_node.send_expect(cmd, "doing packet RX", timeout=30) - tx_port = self.tester.get_local_port(self.dut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) tgen_input = [(tx_port, tx_port)] - self.tester.check_random_pkts(tgen_input, pktnum=256, seq_check=True) + self.tg_node.check_random_pkts(tgen_input, pktnum=256, seq_check=True) - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") def test_multiple_ports(self): """ Check distributor app work fine with multiple ports """ - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.verify(len(self.sut_ports) >= 2, "Not enough ports") cmd_fmt = "%s %s -- -p 0x3" - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/%dC/1T" % (2 + 4), socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/%dC/1T" % (2 + 4), socket) - eal_para = self.dut.create_eal_parameters(cores=cores, ports=[0, 1]) + eal_para = self.sut_node.create_eal_parameters(cores=cores, ports=[0, 1]) cmd = cmd_fmt % (self.app_distributor_path, eal_para) - self.dut.send_expect(cmd, "doing packet RX", timeout=30) + self.sut_node.send_expect(cmd, "doing packet RX", timeout=30) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_input = [(tx_port, rx_port)] - self.tester.check_random_pkts(tgen_input, pktnum=256, seq_check=True) + self.tg_node.check_random_pkts(tgen_input, pktnum=256, seq_check=True) tgen_input = [(rx_port, tx_port)] - self.tester.check_random_pkts(tgen_input, pktnum=256, seq_check=True) + self.tg_node.check_random_pkts(tgen_input, pktnum=256, seq_check=True) - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") def _get_thread_lcore(self, core_num): def strip_core(x): return int(x["thread"]) - cores = list(map(strip_core, self.dut.cores[0:core_num])) + cores = list(map(strip_core, self.sut_node.cores[0:core_num])) return cores def hook_transmission_func(self): - self.app_output = self.dut.session.get_session_before(timeout=2) + self.app_output = self.sut_node.session.get_session_before(timeout=2) def strip_performance_data(self, output=""): """ diff --git a/tests/TestSuite_dpdk_gro_lib.py b/tests/TestSuite_dpdk_gro_lib.py index 0c75334b..76edb6ae 100644 --- a/tests/TestSuite_dpdk_gro_lib.py +++ b/tests/TestSuite_dpdk_gro_lib.py @@ -22,15 +22,15 @@ class TestDPDKGROLib(TestCase): def set_up_all(self): # This suite will not use the port config in ports.cfg # it will use the port config in vhost_peer_conf.cfg - # And it need two interface reconnet in DUT + # And it need two interface reconnet in SUT # unbind the port which config in ports.cfg - self.dut_ports = self.dut.get_ports() - self.def_driver = self.dut.ports_info[self.dut_ports[0]][ + self.sut_ports = self.sut_node.get_ports() + self.def_driver = self.sut_node.ports_info[self.sut_ports[0]][ "port" ].get_nic_driver() - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver() # get and bind the port in config file self.pci = peer.get_pci_info() @@ -44,11 +44,11 @@ class TestDPDKGROLib(TestCase): and len(self.nic_in_kernel) != 0, "Pls config the direct connection info in vhost_peer_conf.cfg", ) - bind_script_path = self.dut.get_dpdk_bind_script() - self.dut.send_expect( + bind_script_path = self.sut_node.get_dpdk_bind_script() + self.sut_node.send_expect( "%s --bind=%s %s" % (bind_script_path, self.def_driver, self.pci), "# " ) - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] # get the numa info about the pci info which config in peer cfg @@ -60,24 +60,24 @@ class TestDPDKGROLib(TestCase): # get core list on this socket, 2 cores for testpmd, 1 core for qemu cores_config = "1S/3C/1T" self.verify( - self.dut.number_of_cores >= 3, + self.sut_node.number_of_cores >= 3, "There has not enought cores to test this case %s" % self.suite_name, ) - cores_list = self.dut.get_core_list("1S/3C/1T", socket=self.socket) + cores_list = self.sut_node.get_core_list("1S/3C/1T", socket=self.socket) self.vhost_list = cores_list[0:2] self.qemu_cpupin = cores_list[2:3][0] # Set the params for VM self.virtio_ip1 = "1.1.1.2" self.virtio_mac1 = "52:54:00:00:00:01" - self.memory_channel = self.dut.get_memory_channels() - if len(set([int(core["socket"]) for core in self.dut.cores])) == 1: + self.memory_channel = self.sut_node.get_memory_channels() + if len(set([int(core["socket"]) for core in self.sut_node.cores])) == 1: self.socket_mem = "1024" else: self.socket_mem = "1024,1024" self.prepare_dpdk() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) # get cbdma device self.cbdma_dev_infos = [] self.dmas_info = None @@ -88,15 +88,15 @@ class TestDPDKGROLib(TestCase): # Run before each test case. # # Clean the execution ENV - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -125,7 +125,7 @@ class TestDPDKGROLib(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(self.cbdma_dev_infos) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -134,11 +134,11 @@ class TestDPDKGROLib(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -155,7 +155,7 @@ class TestDPDKGROLib(TestCase): # mode 5 : tcp traffice light mode with cdbma enable if mode == 5: self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2) - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( cores=self.vhost_list, vdevs=[ "'net_vhost0,iface=%s/vhost-net,queues=%s,dmas=[%s]'" @@ -165,10 +165,10 @@ class TestDPDKGROLib(TestCase): self.testcmd_start = ( self.path + eal_param + " -- -i --txd=1024 --rxd=1024 --txq=2 --rxq=2" ) - self.vhost_user = self.dut.new_session(suite="user") + self.vhost_user = self.sut_node.new_session(suite="user") self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120) else: - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( cores=self.vhost_list, vdevs=[ "net_vhost0,iface=%s/vhost-net,queues=%s" % (self.base_dir, queue) @@ -180,7 +180,7 @@ class TestDPDKGROLib(TestCase): + eal_param + " -- -i --enable-hw-vlan-strip --tx-offloads=0x00 --txd=1024 --rxd=1024" ) - self.vhost_user = self.dut.new_session(suite="user") + self.vhost_user = self.sut_node.new_session(suite="user") self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120) self.set_testpmd_params() @@ -224,54 +224,54 @@ class TestDPDKGROLib(TestCase): def quit_testpmd(self): # Quit testpmd and close temp ssh session self.vhost_user.send_expect("quit", "#", 120) - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) def config_kernel_nic_host(self, mode=1): if mode == 0: - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 1.1.1.8 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ethtool -K %s tso on" % self.nic_in_kernel, "#" ) if mode == 1: - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 1.1.2.4/24 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ip link add vxlan1 type vxlan id 42 dev %s dstport 4789" % self.nic_in_kernel, "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 bridge fdb append to 00:00:00:00:00:00 dst 1.1.2.3 dev vxlan1", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ip addr add 50.1.1.1/24 dev vxlan1", "#" ) - self.dut.send_expect("ip netns exec ns1 ip link set up dev vxlan1", "#") + self.sut_node.send_expect("ip netns exec ns1 ip link set up dev vxlan1", "#") def prepare_dpdk(self): # # Changhe the testpmd checksum fwd code for mac change - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly.c ./app/test-pmd/csumonly_backup.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "cp ./drivers/net/vhost/rte_eth_vhost.c ./drivers/net/vhost/rte_eth_vhost-backup.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/ether_addr_copy(&peer_eth/i\#if 0' ./app/test-pmd/csumonly.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/parse_ethernet(eth_hdr, &info/i\#endif' ./app/test-pmd/csumonly.c", "#", ) @@ -290,30 +290,30 @@ class TestDPDKGROLib(TestCase): + "DEV_RX_OFFLOAD_IPV4_CKSUM | " + "DEV_RX_OFFLOAD_TCP_LRO;" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i 's/DEV_TX_OFFLOAD_VLAN_INSERT;/%s/' drivers/net/vhost/rte_eth_vhost.c" % tx_offload, "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i 's/DEV_RX_OFFLOAD_VLAN_STRIP;/%s/' drivers/net/vhost/rte_eth_vhost.c" % rx_offload, "#", ) - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.build_install_dpdk(self.sut_node.target) def unprepare_dpdk(self): # Recovery the DPDK code to original - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly_backup.c ./app/test-pmd/csumonly.c ", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "cp ./drivers/net/vhost/rte_eth_vhost-backup.c ./drivers/net/vhost/rte_eth_vhost.c ", "#", ) - self.dut.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") - self.dut.send_expect("rm -rf ./drivers/net/vhost/rte_eth_vhost-backup.c", "#") - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") + self.sut_node.send_expect("rm -rf ./drivers/net/vhost/rte_eth_vhost-backup.c", "#") + self.sut_node.build_install_dpdk(self.sut_node.target) def set_vm_cpu_number(self, vm_config): # config the vcpu numbers = 1 @@ -325,7 +325,7 @@ class TestDPDKGROLib(TestCase): vm_config.params[i]["cpu"][0]["cpupin"] = self.qemu_cpupin def start_vm(self, mode=1, queue=1): - self.vm1 = VM(self.dut, "vm0", "vhost_sample") + self.vm1 = VM(self.sut_node, "vm0", "vhost_sample") self.vm1.load_config() vm_params_1 = {} vm_params_1["driver"] = "vhost-user" @@ -343,18 +343,18 @@ class TestDPDKGROLib(TestCase): self.vm1.set_vm_device(**vm_params_1) self.set_vm_cpu_number(self.vm1) try: - self.vm1_dut = self.vm1.start(load_config=False, set_target=False) - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start(load_config=False, set_target=False) + if self.vm1_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) - self.vm1_dut.restore_interfaces() + self.vm1_sut.restore_interfaces() def iperf_result_verify(self, run_info): """ Get the iperf test result """ - fmsg = self.dut.send_expect("cat /root/iperf_client.log", "#") + fmsg = self.sut_node.send_expect("cat /root/iperf_client.log", "#") print(fmsg) iperfdata = re.compile("[\d+]*.[\d+]* [M|G|K]bits/sec").findall(fmsg) print(iperfdata) @@ -381,20 +381,20 @@ class TestDPDKGROLib(TestCase): self.launch_testpmd_gro_on() self.start_vm() time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("iperf -s", "", 10) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("iperf -s", "", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 1 -t 10 -P 1> /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -403,10 +403,10 @@ class TestDPDKGROLib(TestCase): time.sleep(30) tc1_perfdata = self.iperf_result_verify("GRO lib") print(("the GRO lib %s " % (self.output_result))) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) # Turn off DPDK GRO lib and Kernel GRO off self.set_testpmd_gro_off() - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 1 -t 10 -P 1 > /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -415,10 +415,10 @@ class TestDPDKGROLib(TestCase): time.sleep(30) self.iperf_result_verify("Kernel GRO") print(("the Kernel GRO %s " % (self.output_result))) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect( + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect( "echo %s > /root/dpdk_gro_lib_on_iperf_tc1.log" % tc1_perfdata, "#", 10 ) @@ -428,20 +428,20 @@ class TestDPDKGROLib(TestCase): self.launch_testpmd_gro_on(self.heavymode) self.start_vm() time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("iperf -s", "", 10) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("iperf -s", "", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 1 -t 10 -P 1> /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -450,9 +450,9 @@ class TestDPDKGROLib(TestCase): time.sleep(30) self.iperf_result_verify("GRO lib") print(("the GRO lib %s " % (self.output_result))) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_vhost_gro_tcp_heavymode_flush4(self): self.config_kernel_nic_host(0) @@ -460,20 +460,20 @@ class TestDPDKGROLib(TestCase): self.launch_testpmd_gro_on(self.heavymode) self.start_vm() time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("iperf -s", "", 10) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("iperf -s", "", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 1 -t 10 -P 1> /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -482,9 +482,9 @@ class TestDPDKGROLib(TestCase): time.sleep(30) self.iperf_result_verify("GRO lib") print(("the GRO lib %s " % (self.output_result))) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_vhost_gro_tcp_ipv4_with_cbdma_enable(self): self.config_kernel_nic_host(0) @@ -492,21 +492,21 @@ class TestDPDKGROLib(TestCase): self.launch_testpmd_gro_on(self.heavymode, queue=2) self.start_vm(mode=5, queue=2) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s up" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ethtool -L %s combined 2" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("ethtool -L %s combined 2" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) self.set_testpmd_params() - self.vm1_dut.send_expect("iperf -s", "", 10) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) - out = self.dut.send_expect( + self.vm1_sut.send_expect("iperf -s", "", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) + out = self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 1 -t 60 -m -P 2 > /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -516,10 +516,10 @@ class TestDPDKGROLib(TestCase): print(out) perfdata = self.iperf_result_verify("GRO lib") print(("the GRO lib %s " % (self.output_result))) - # self.dut.send_expect('rm /root/iperf_client.log', '#', 10) + # self.sut_node.send_expect('rm /root/iperf_client.log', '#', 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - exp_perfdata = self.dut.send_expect( + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + exp_perfdata = self.sut_node.send_expect( "cat /root/dpdk_gro_lib_on_iperf_tc5.log", "#" ) self.verify( @@ -541,20 +541,20 @@ class TestDPDKGROLib(TestCase): self.launch_testpmd_gro_on(mode=1, queue=2) self.start_vm(mode=1, queue=2) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("iperf -s", "", 10) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("iperf -s", "", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 2 -t 60 -f g -m > /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -563,10 +563,10 @@ class TestDPDKGROLib(TestCase): time.sleep(60) perfdata = self.iperf_result_verify("GRO lib") print(("the GRO lib %s " % (self.output_result))) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) # Turn off DPDK GRO lib and Kernel GRO off self.set_testpmd_gro_off() - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 2 -t 60 -f g -m > /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -575,10 +575,10 @@ class TestDPDKGROLib(TestCase): time.sleep(60) self.iperf_result_verify("Kernel GRO") print(("the Kernel GRO %s " % (self.output_result))) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect( + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect( "echo %s > /root/dpdk_gro_lib_on_iperf_tc5.log" % perfdata, "#", 10 ) @@ -586,15 +586,15 @@ class TestDPDKGROLib(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net" % self.base_dir, "#") time.sleep(2) - self.dut.send_expect("ip netns del ns1", "# ", 30) - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "# ", 30) + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % (self.peer_pci), "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.peer_pci), "# ", 30, @@ -605,12 +605,12 @@ class TestDPDKGROLib(TestCase): """ Run after each test suite. """ - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver(self.def_driver) self.unprepare_dpdk() - self.dut.send_expect("ip netns del ns1", "# ", 30) - self.dut.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30) - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "# ", 30) + self.sut_node.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30) + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.pci), "# ", 30 ) diff --git a/tests/TestSuite_dpdk_gro_lib_cbdma.py b/tests/TestSuite_dpdk_gro_lib_cbdma.py index 50eb58c5..b571d00f 100644 --- a/tests/TestSuite_dpdk_gro_lib_cbdma.py +++ b/tests/TestSuite_dpdk_gro_lib_cbdma.py @@ -21,9 +21,9 @@ from framework.virt_common import VM class TestDPDKGROLibCbdma(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.def_driver = self.dut.ports_info[self.dut_ports[0]][ + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.def_driver = self.sut_node.ports_info[self.sut_ports[0]][ "port" ].get_nic_driver() self.pci = peer.get_pci_info() @@ -37,37 +37,37 @@ class TestDPDKGROLibCbdma(TestCase): and len(self.nic_in_kernel) != 0, "Pls config the direct connection info in vhost_peer_conf.cfg", ) - bind_script_path = self.dut.get_dpdk_bind_script() - self.dut.send_expect( + bind_script_path = self.sut_node.get_dpdk_bind_script() + self.sut_node.send_expect( "%s --bind=%s %s" % (bind_script_path, self.def_driver, self.pci), "# " ) - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] - cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + cores_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_list = cores_list[0:3] self.qemu_cpupin = cores_list[3:4][0] # Set the params for VM self.virtio_ip1 = "1.1.1.2" self.virtio_mac1 = "52:54:00:00:00:01" - self.memory_channel = self.dut.get_memory_channels() - if len(set([int(core["socket"]) for core in self.dut.cores])) == 1: + self.memory_channel = self.sut_node.get_memory_channels() + if len(set([int(core["socket"]) for core in self.sut_node.cores])) == 1: self.socket_mem = "1024" else: self.socket_mem = "1024,1024" self.prepare_dpdk() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost_user) + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost_user) def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): """ @@ -76,7 +76,7 @@ class TestDPDKGROLibCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -100,7 +100,7 @@ class TestDPDKGROLibCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -108,11 +108,11 @@ class TestDPDKGROLibCbdma(TestCase): ) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -135,32 +135,32 @@ class TestDPDKGROLibCbdma(TestCase): def quit_testpmd(self): self.vhost_user.send_expect("quit", "#", 120) - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) def config_kernel_nic_host(self): - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 1.1.1.8 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ethtool -K %s tso on" % self.nic_in_kernel, "#" ) def prepare_dpdk(self): # Changhe the testpmd checksum fwd code for mac change - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly.c ./app/test-pmd/csumonly_backup.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "cp ./drivers/net/vhost/rte_eth_vhost.c ./drivers/net/vhost/rte_eth_vhost-backup.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/ether_addr_copy(&peer_eth/i\#if 0' ./app/test-pmd/csumonly.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/parse_ethernet(eth_hdr, &info/i\#endif' ./app/test-pmd/csumonly.c", "#", ) @@ -179,30 +179,30 @@ class TestDPDKGROLibCbdma(TestCase): + "RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | " + "RTE_ETH_RX_OFFLOAD_TCP_LRO;" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i 's/RTE_ETH_TX_OFFLOAD_VLAN_INSERT;/%s/' drivers/net/vhost/rte_eth_vhost.c" % tx_offload, "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i 's/RTE_ETH_RX_OFFLOAD_VLAN_STRIP;/%s/' drivers/net/vhost/rte_eth_vhost.c" % rx_offload, "#", ) - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.build_install_dpdk(self.sut_node.target) def unprepare_dpdk(self): # Recovery the DPDK code to original - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly_backup.c ./app/test-pmd/csumonly.c ", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "cp ./drivers/net/vhost/rte_eth_vhost-backup.c ./drivers/net/vhost/rte_eth_vhost.c ", "#", ) - self.dut.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") - self.dut.send_expect("rm -rf ./drivers/net/vhost/rte_eth_vhost-backup.c", "#") - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") + self.sut_node.send_expect("rm -rf ./drivers/net/vhost/rte_eth_vhost-backup.c", "#") + self.sut_node.build_install_dpdk(self.sut_node.target) def set_vm_cpu_number(self, vm_config): # config the vcpu numbers = 1 @@ -214,7 +214,7 @@ class TestDPDKGROLibCbdma(TestCase): vm_config.params[i]["cpu"][0]["cpupin"] = self.qemu_cpupin def start_vm(self, queue=1): - self.vm1 = VM(self.dut, "vm0", "vhost_sample") + self.vm1 = VM(self.sut_node, "vm0", "vhost_sample") self.vm1.load_config() vm_params_1 = {} vm_params_1["driver"] = "vhost-user" @@ -227,18 +227,18 @@ class TestDPDKGROLibCbdma(TestCase): self.vm1.set_vm_device(**vm_params_1) self.set_vm_cpu_number(self.vm1) try: - self.vm1_dut = self.vm1.start(load_config=False, set_target=False) - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start(load_config=False, set_target=False) + if self.vm1_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) - self.vm1_dut.restore_interfaces() + self.vm1_sut.restore_interfaces() def iperf_result_verify(self, run_info): """ Get the iperf test result """ - fmsg = self.dut.send_expect("cat /root/iperf_client.log", "#") + fmsg = self.sut_node.send_expect("cat /root/iperf_client.log", "#") print(fmsg) iperfdata = re.compile("[\d+]*.[\d+]* [M|G|K]bits/sec").findall(fmsg) print(iperfdata) @@ -260,8 +260,8 @@ class TestDPDKGROLibCbdma(TestCase): iperfdata_kb = float(tmp_value) return iperfdata_kb - def check_dut_perf_top_info(self, check_string): - self.dut.send_expect("perf top", "# ") + def check_sut_perf_top_info(self, check_string): + self.sut_node.send_expect("perf top", "# ") def test_vhost_gro_tcp_ipv4_with_cbdma_enable(self): """ @@ -295,17 +295,17 @@ class TestDPDKGROLibCbdma(TestCase): self.set_testpmd_params() self.start_vm(queue=2) time.sleep(5) - self.dut.get_session_output(timeout=2) - for port in self.vm1_dut.ports_info: + self.sut_node.get_session_output(timeout=2) + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] - self.vm1_dut.send_expect( + self.vm1_sut.send_expect( "ifconfig %s %s up" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ethtool -L %s combined 2" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("iperf -s", "", 10) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) - out = self.dut.send_expect( + self.vm1_sut.send_expect("ethtool -L %s combined 2" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gro off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("iperf -s", "", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) + out = self.sut_node.send_expect( "ip netns exec ns1 iperf -c %s -i 1 -t 60 -m -P 2 > /root/iperf_client.log &" % (self.virtio_ip1), "", @@ -316,7 +316,7 @@ class TestDPDKGROLibCbdma(TestCase): perfdata = self.iperf_result_verify("GRO lib") print(("the GRO lib %s " % (self.output_result))) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") exp_perfdata = 10000000 if exp_perfdata: self.verify( @@ -329,15 +329,15 @@ class TestDPDKGROLibCbdma(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net" % self.base_dir, "#") time.sleep(2) - self.dut.send_expect("ip netns del ns1", "# ", 30) - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "# ", 30) + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % (self.peer_pci), "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.peer_pci), "# ", 30, @@ -349,8 +349,8 @@ class TestDPDKGROLibCbdma(TestCase): Run after each test suite. """ self.unprepare_dpdk() - self.dut.send_expect("ip netns del ns1", "# ", 30) - self.dut.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30) - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "# ", 30) + self.sut_node.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30) + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.pci), "# ", 30 ) diff --git a/tests/TestSuite_dpdk_gso_lib.py b/tests/TestSuite_dpdk_gso_lib.py index ee71bcd6..aae06e20 100644 --- a/tests/TestSuite_dpdk_gso_lib.py +++ b/tests/TestSuite_dpdk_gso_lib.py @@ -23,15 +23,15 @@ class TestDPDKGsoLib(TestCase): def set_up_all(self): # This suite will not use the port config in ports.cfg # it will use the port config in vhost_gro.cfg - # And it need two interface reconnet in DUT + # And it need two interface reconnet in SUT # unbind the port which config in ports.cfg - self.dut_ports = self.dut.get_ports() - self.def_driver = self.dut.ports_info[self.dut_ports[0]][ + self.sut_ports = self.sut_node.get_ports() + self.def_driver = self.sut_node.ports_info[self.sut_ports[0]][ "port" ].get_nic_driver() - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver() # get and bind the port in conf file @@ -46,11 +46,11 @@ class TestDPDKGsoLib(TestCase): and len(self.nic_in_kernel) != 0, "Pls config the direct connection info in vhost_peer_conf.cfg", ) - bind_script_path = self.dut.get_dpdk_bind_script() - self.dut.send_expect( + bind_script_path = self.sut_node.get_dpdk_bind_script() + self.sut_node.send_expect( "%s --bind=%s %s" % (bind_script_path, self.def_driver, self.pci), "# " ) - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] # get the numa info about the pci info which config in peer cfg @@ -62,33 +62,33 @@ class TestDPDKGsoLib(TestCase): # get core list on this socket, 2 cores for testpmd, 1 core for qemu cores_config = "1S/3C/1T" self.verify( - self.dut.number_of_cores >= 3, + self.sut_node.number_of_cores >= 3, "There has not enought cores to test this case %s" % self.suite_name, ) - cores_list = self.dut.get_core_list("1S/3C/1T", socket=self.socket) + cores_list = self.sut_node.get_core_list("1S/3C/1T", socket=self.socket) self.vhost_list = cores_list[0:2] self.qemu_cpupin = cores_list[2:3][0] # Set the params for VM self.virtio_ip1 = "1.1.1.2" self.virtio_mac1 = "52:54:00:00:00:01" - self.memory_channel = self.dut.get_memory_channels() + self.memory_channel = self.sut_node.get_memory_channels() # set diff arg about mem_socket base on socket number - if len(set([int(core["socket"]) for core in self.dut.cores])) == 1: + if len(set([int(core["socket"]) for core in self.sut_node.cores])) == 1: self.socket_mem = "1024" else: self.socket_mem = "1024,1024" self.prepare_dpdk() - self.base_dir = self.dut.base_dir.replace("~", "/root") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") def set_up(self): # # Run before each test case. # Clean the execution ENV - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def launch_testpmd_gso_on(self, mode=0): # mode = 0: DPDK GSO for TCP Traffic @@ -96,7 +96,7 @@ class TestDPDKGsoLib(TestCase): # mode = 2: DPDK GSO for Vxlan/GRE Traffic # mode = 3: TSO # mode = others: NO DPDK GSO/TSO - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( cores=self.vhost_list, vdevs=["net_vhost0,iface=%s/vhost-net,queues=1" % self.base_dir], ports=[self.pci], @@ -104,7 +104,7 @@ class TestDPDKGsoLib(TestCase): self.testcmd_start = ( self.path + eal_param + " -- -i --tx-offloads=0x00 --txd=1024 --rxd=1024" ) - self.vhost_user = self.dut.new_session(suite="user") + self.vhost_user = self.sut_node.new_session(suite="user") self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120) self.vhost_user.send_expect("set fwd csum", "testpmd> ", 120) self.vhost_user.send_expect("stop", "testpmd> ", 120) @@ -146,69 +146,69 @@ class TestDPDKGsoLib(TestCase): def quit_testpmd(self): # Quit testpmd and close temp ssh session self.vhost_user.send_expect("quit", "#", 120) - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) def config_kernel_nic_host(self): # - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 1.1.1.8 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ethtool -K %s gro on" % self.nic_in_kernel, "#" ) def config_kernel_nic_host_for_vxlan(self): - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 188.0.0.1 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ip link add vxlan100 type vxlan id 1000 remote 188.0.0.2 local 188.0.0.1 dstport 4789 dev %s" % self.nic_in_kernel, "#", ) - self.dut.send_expect("ip netns exec ns1 ifconfig vxlan100 1.1.1.1/24 up", "#") + self.sut_node.send_expect("ip netns exec ns1 ifconfig vxlan100 1.1.1.1/24 up", "#") def config_kernel_nic_host_for_gre(self): - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 188.0.0.1 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ip tunnel add gre100 mode gre remote 188.0.0.2 local 188.0.0.1", "#", ) - self.dut.send_expect("ip netns exec ns1 ifconfig gre100 1.1.1.1/24 up", "#") + self.sut_node.send_expect("ip netns exec ns1 ifconfig gre100 1.1.1.1/24 up", "#") def prepare_dpdk(self): # Changhe the testpmd checksum fwd code for mac change - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly.c ./app/test-pmd/csumonly_backup.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/ether_addr_copy(&peer_eth/i\#if 0' ./app/test-pmd/csumonly.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/parse_ethernet(eth_hdr, &info/i\#endif' ./app/test-pmd/csumonly.c", "#", ) - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.build_install_dpdk(self.sut_node.target) def unprepare_dpdk(self): # Recovery the DPDK code to original time.sleep(5) - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly_backup.c ./app/test-pmd/csumonly.c ", "#" ) - self.dut.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") + self.sut_node.build_install_dpdk(self.sut_node.target) def set_vm_cpu_number(self, vm_config): # config the vcpu numbers = 1 @@ -225,7 +225,7 @@ class TestDPDKGsoLib(TestCase): mode 0 : VM will send big packet , above MTU mdoe 1: VM only send packet under MTU """ - self.vm1 = VM(self.dut, "vm0", "vhost_sample") + self.vm1 = VM(self.sut_node, "vm0", "vhost_sample") self.vm1.load_config() vm_params_1 = {} vm_params_1["driver"] = "vhost-user" @@ -251,12 +251,12 @@ class TestDPDKGsoLib(TestCase): time.sleep(5) try: - self.vm1_dut = self.vm1.start(load_config=False, set_target=False) - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start(load_config=False, set_target=False) + if self.vm1_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) - self.vm1_dut.restore_interfaces() + self.vm1_sut.restore_interfaces() def iperf_result_verify(self, vm_client): """ @@ -284,29 +284,29 @@ class TestDPDKGsoLib(TestCase): self.launch_testpmd_gso_on(0) self.start_vm(0) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] - self.vm1_dut.send_expect("sh /home/lei/dpdk/Guest_script.sh", "#", 60) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("sh /home/lei/dpdk/Guest_script.sh", "#", 60) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns exec ns1 iperf -s", "", 10) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns exec ns1 iperf -s", "", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.8 -i 1 -t 10 -P 5 > /root/iperf_client.log &", "", 180 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) print(("the GSO lib for TCP traffic %s " % (self.output_result))) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns del ns1", "#") + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns del ns1", "#") self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_vhost_gso_dpdk_udp(self): """ @@ -317,117 +317,117 @@ class TestDPDKGsoLib(TestCase): self.launch_testpmd_gso_on(1) self.start_vm(0) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] - self.vm1_dut.send_expect( + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns exec ns1 iperf -s -u", "", 10) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns exec ns1 iperf -s -u", "", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.8 -i 1 -u -t 10 -l 9000 -b 10G -P 5 > /root/iperf_client.log &", "", 60, ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) print(("the GSO lib for UDP traffic %s " % (self.output_result))) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns del ns1", "#") + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns del ns1", "#") self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_vhost_tso_dpdk(self): self.config_kernel_nic_host() self.launch_testpmd_gso_on(3) self.start_vm(0) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns exec ns1 iperf -s", "", 10) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns exec ns1 iperf -s", "", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.8 -i 1 -t 10 -P 5 > /root/iperf_client.log &", "", 180 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) print(("the TSO lib %s " % (self.output_result))) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_kernel_gso_dpdk(self): self.config_kernel_nic_host() self.launch_testpmd_gso_on(4) self.start_vm(1) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gso on" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns exec ns1 iperf -s", "", 10) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gso on" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns exec ns1 iperf -s", "", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.8 -i 1 -t 10 -P 5 > /root/iperf_client.log &", "", 180 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) print(("Kernel GSO %s " % (self.output_result))) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_no_gso_dpdk(self): self.config_kernel_nic_host() self.launch_testpmd_gso_on(4) self.start_vm(1) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] # Start the Iperf test - self.vm1_dut.send_expect("ifconfig -a", "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig -a", "#", 30) + self.vm1_sut.send_expect( "ifconfig %s %s" % (self.vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm1_dut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) - self.vm1_dut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns exec ns1 iperf -s", "", 10) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s up" % self.vm1_intf, "#", 10) + self.vm1_sut.send_expect("ethtool -K %s gso off" % (self.vm1_intf), "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns exec ns1 iperf -s", "", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.8 -i 1 -t 10 -P 5 > /root/iperf_client.log &", "", 180 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) print(("NO GSO/TSO %s " % (self.output_result))) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_vhost_gso_with_vxlan(self): """ @@ -437,32 +437,32 @@ class TestDPDKGsoLib(TestCase): self.launch_testpmd_gso_on(2) self.start_vm(2) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name and unbind virtio net - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] - self.vm1_dut.send_expect("ifconfig %s 188.0.0.2 up" % self.vm1_intf, "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s 188.0.0.2 up" % self.vm1_intf, "#", 30) + self.vm1_sut.send_expect( "ip link add vxlan100 type vxlan id 1000 remote 188.0.0.1 local 188.0.0.2 dstport 4789 dev %s" % self.vm1_intf, "#", 30, ) - self.vm1_dut.send_expect("ifconfig vxlan100 1.1.1.2/24 up", "#", 30) + self.vm1_sut.send_expect("ifconfig vxlan100 1.1.1.2/24 up", "#", 30) # Start Iperf test - self.dut.send_expect("ip netns exec ns1 iperf -s ", "", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.vm1_dut.send_expect( + self.sut_node.send_expect("ip netns exec ns1 iperf -s ", "", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.1 -i 1 -t 10 -P 5 > /root/iperf_client.log &", "", 60 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) print(("the GSO lib for Vxlan traffic %s " % (self.output_result))) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns del ns1", "#") + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns del ns1", "#") self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def test_vhost_gso_with_gre(self): """ @@ -472,38 +472,38 @@ class TestDPDKGsoLib(TestCase): self.launch_testpmd_gso_on(2) self.start_vm(2) time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) # Get the virtio-net device name and unbind virtio net - for port in self.vm1_dut.ports_info: + for port in self.vm1_sut.ports_info: self.vm1_intf = port["intf"] - self.vm1_dut.send_expect("ifconfig %s 188.0.0.2 up" % self.vm1_intf, "#", 30) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig %s 188.0.0.2 up" % self.vm1_intf, "#", 30) + self.vm1_sut.send_expect( "ip tunnel add gre100 mode gre remote 188.0.0.1 local 188.0.0.2", "#", 30 ) - self.vm1_dut.send_expect("ifconfig gre100 1.1.1.2/24 up", "#", 30) - self.dut.send_expect("ip netns exec ns1 iperf -s", "", 10) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.vm1_dut.send_expect( + self.vm1_sut.send_expect("ifconfig gre100 1.1.1.2/24 up", "#", 30) + self.sut_node.send_expect("ip netns exec ns1 iperf -s", "", 10) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.vm1_sut.send_expect( "iperf -c 1.1.1.1 -i 1 -t 10 -P 5 > /root/iperf_client.log &", "", 60 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) - self.iperf_result_verify(self.vm1_dut) - self.vm1_dut.send_expect("rm /root/iperf_client.log", "#", 10) - self.dut.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("^C", "#", 10) + self.iperf_result_verify(self.vm1_sut) + self.vm1_sut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("ip netns del ns1", "#") self.quit_testpmd() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect( + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % (self.peer_pci), "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.peer_pci), "# ", 30, @@ -514,12 +514,12 @@ class TestDPDKGsoLib(TestCase): """ Run after each test suite. """ - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver(self.def_driver) self.unprepare_dpdk() - self.dut.send_expect("ip netns del ns1", "#", 30) - self.dut.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30) - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#", 30) + self.sut_node.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30) + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.pci), "# ", 30 ) diff --git a/tests/TestSuite_dpdk_hugetlbfs_mount_size.py b/tests/TestSuite_dpdk_hugetlbfs_mount_size.py index 6ab2bdbd..f7e85661 100644 --- a/tests/TestSuite_dpdk_hugetlbfs_mount_size.py +++ b/tests/TestSuite_dpdk_hugetlbfs_mount_size.py @@ -48,18 +48,18 @@ class DpdkHugetlbfsMountSize(TestCase): Run at the start of each test suite. """ self.packet_num = 100 - self.mem_channels = self.dut.get_memory_channels() - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/6C/1T", socket=self.ports_socket) + self.mem_channels = self.sut_node.get_memory_channels() + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/6C/1T", socket=self.ports_socket) self.verify(len(cores) >= 6, "Insufficient cores for speed testing") self.core_list1 = ",".join(str(i) for i in cores[0:2]) self.core_list2 = ",".join(str(i) for i in cores[2:4]) self.core_list3 = ",".join(str(i) for i in cores[4:6]) - self.pci_info_0 = self.dut.ports_info[0]["pci"] - self.pci_info_1 = self.dut.ports_info[1]["pci"] - self.numa_id = self.dut.get_numa_id(self.dut_ports[0]) + self.pci_info_0 = self.sut_node.ports_info[0]["pci"] + self.pci_info_1 = self.sut_node.ports_info[1]["pci"] + self.numa_id = self.sut_node.get_numa_id(self.sut_ports[0]) self.create_folder([MNT_PATH[0], MNT_PATH[1], MNT_PATH[2]]) if self.numa_id == 0: self.socket_mem = "1024,0" @@ -68,7 +68,7 @@ class DpdkHugetlbfsMountSize(TestCase): self.socket_mem = "0,1024" self.socket_mem2 = "0,2048" self.umount_huge([DEFAULT_MNT]) - self.app_path = self.dut.apps_name["test-pmd"] + self.app_path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -77,18 +77,18 @@ class DpdkHugetlbfsMountSize(TestCase): pass def close_session(self): - self.dut.close_session(self.session_first) - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_first) + self.sut_node.close_session(self.session_secondary) def send_pkg(self, port_id): - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[port_id]) + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[port_id]) ) - mac = self.dut.get_mac_address(self.dut_ports[port_id]) + mac = self.sut_node.get_mac_address(self.sut_ports[port_id]) cmd = 'sendp([Ether(dst="%s")/IP()/("X"*64)], iface="%s", count=%d)' excute_cmd = cmd % (mac, tx_interface, self.packet_num) - self.tester.scapy_append(excute_cmd) - self.tester.scapy_execute() + self.tg_node.scapy_append(excute_cmd) + self.tg_node.scapy_execute() def verify_result(self, session): out = session.send_expect("show port stats all", "testpmd> ", 120) @@ -103,21 +103,21 @@ class DpdkHugetlbfsMountSize(TestCase): def create_folder(self, huges=[]): for huge in huges: cmd = "mkdir -p %s" % huge - self.dut.send_expect(cmd, "#", 15) + self.sut_node.send_expect(cmd, "#", 15) def del_folder(self, huges=[]): for huge in huges: cmd = "rm -rf %s" % huge - self.dut.send_expect(cmd, "#", 15) + self.sut_node.send_expect(cmd, "#", 15) def umount_huge(self, huges=[]): for huge in huges: cmd = "umount %s" % huge - self.dut.send_expect(cmd, "#", 15) + self.sut_node.send_expect(cmd, "#", 15) def test_default_hugepage_size(self): # Bind one nic port to igb_uio driver, launch testpmd - self.dut.send_expect("mount -t hugetlbfs hugetlbfs %s" % MNT_PATH[0], "#", 15) + self.sut_node.send_expect("mount -t hugetlbfs hugetlbfs %s" % MNT_PATH[0], "#", 15) self.logger.info("test default hugepage size start testpmd without numa") ttd = "%s -l %s -n %d --huge-dir %s --file-prefix=%s -a %s -- -i" launch_ttd = ttd % ( @@ -128,13 +128,13 @@ class DpdkHugetlbfsMountSize(TestCase): vhost_name[0], self.pci_info_0, ) - self.dut.send_expect(launch_ttd, "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect(launch_ttd, "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) self.send_pkg(0) - self.verify_result(self.dut) - self.dut.send_expect("quit", "#", 15) + self.verify_result(self.sut_node) + self.sut_node.send_expect("quit", "#", 15) # resart testpmd with numa support self.logger.info("test default hugepage size start testpmd with numa") @@ -149,23 +149,23 @@ class DpdkHugetlbfsMountSize(TestCase): vhost_name[0], self.pci_info_0, ) - self.dut.send_expect(launch_ttd_secondary, "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect(launch_ttd_secondary, "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) self.send_pkg(0) - self.verify_result(self.dut) - self.dut.send_expect("quit", "#", 15) + self.verify_result(self.sut_node) + self.sut_node.send_expect("quit", "#", 15) self.umount_huge([MNT_PATH[0]]) def test_mount_size_exactly_match_hugepage_size_two_mount_points(self): # Bind two nic ports to igb_uio driver, launch testpmd with numactl - self.session_first = self.dut.new_session(suite="session_first") - self.session_secondary = self.dut.new_session(suite="session_secondary") - self.dut.send_expect( + self.session_first = self.sut_node.new_session(suite="session_first") + self.session_secondary = self.sut_node.new_session(suite="session_secondary") + self.sut_node.send_expect( "mount -t hugetlbfs -o size=4G hugetlbfs %s" % MNT_PATH[0], "#", 15 ) - self.dut.send_expect( + self.sut_node.send_expect( "mount -t hugetlbfs -o size=4G hugetlbfs %s" % MNT_PATH[1], "#", 15 ) @@ -221,7 +221,7 @@ class DpdkHugetlbfsMountSize(TestCase): def test_mount_size_greater_than_hugepage_size_single_mount_point(self): # Bind one nic port to igb_uio driver - self.dut.send_expect( + self.sut_node.send_expect( "mount -t hugetlbfs -o size=9G hugetlbfs %s" % MNT_PATH[0], "#", 15 ) ttd = "%s -l %s -n %d --legacy-mem --huge-dir %s --file-prefix=%s -a %s -- -i" @@ -233,26 +233,26 @@ class DpdkHugetlbfsMountSize(TestCase): vhost_name[0], self.pci_info_0, ) - self.dut.send_expect(launch_ttd, "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect(launch_ttd, "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) self.send_pkg(0) - self.verify_result(self.dut) - self.dut.send_expect("quit", "#", 15) + self.verify_result(self.sut_node) + self.sut_node.send_expect("quit", "#", 15) self.umount_huge([MNT_PATH[0]]) def test_mount_size_greater_than_hugepage_size_multiple_mount_points(self): # Bind one nic port to igb_uio driver, launch testpmd - self.session_first = self.dut.new_session(suite="session_first") - self.session_secondary = self.dut.new_session(suite="session_secondary") - self.dut.send_expect( + self.session_first = self.sut_node.new_session(suite="session_first") + self.session_secondary = self.sut_node.new_session(suite="session_secondary") + self.sut_node.send_expect( "mount -t hugetlbfs -o size=4G hugetlbfs %s" % MNT_PATH[0], "#", 15 ) - self.dut.send_expect( + self.sut_node.send_expect( "mount -t hugetlbfs -o size=4G hugetlbfs %s" % MNT_PATH[1], "#", 15 ) - self.dut.send_expect( + self.sut_node.send_expect( "mount -t hugetlbfs -o size=1G hugetlbfs %s" % MNT_PATH[2], "#", 15 ) # launch first testpmd @@ -317,18 +317,18 @@ class DpdkHugetlbfsMountSize(TestCase): self.numa_id, ) expect_str = "Not enough memory available on socket" - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) try: - self.dut.send_expect(launch_ttd_third, expect_str, 120) + self.sut_node.send_expect(launch_ttd_third, expect_str, 120) except Exception as e: print(e) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("quit", "#", 15) self.session_first.send_expect("quit", "#", 15) self.session_secondary.send_expect("quit", "#", 15) self.umount_huge([MNT_PATH[0], MNT_PATH[1], MNT_PATH[2]]) self.verify(0, "the expect str: %s ,not in output info" % expect_str) self.logger.info("the third testpmd start failed as expect : %s" % expect_str) - result = self.dut.get_session_output(timeout=2) + result = self.sut_node.get_session_output(timeout=2) print(result) # start send packet and verify the session can receive the packet. @@ -343,9 +343,9 @@ class DpdkHugetlbfsMountSize(TestCase): def test_run_dpdk_app_limited_hugepages_controlled_by_cgroup(self): # Bind one nic port to igb_uio driver, launch testpmd in limited hugepages - self.dut.send_expect("mount -t hugetlbfs nodev %s" % MNT_PATH[0], "#", 15) - self.dut.send_expect("cgcreate -g hugetlb:/test-subgroup", "# ", 15) - self.dut.send_expect( + self.sut_node.send_expect("mount -t hugetlbfs nodev %s" % MNT_PATH[0], "#", 15) + self.sut_node.send_expect("cgcreate -g hugetlb:/test-subgroup", "# ", 15) + self.sut_node.send_expect( "cgset -r hugetlb.1GB.limit_in_bytes=2147483648 test-subgroup", "#", 15 ) ttd = "cgexec -g hugetlb:test-subgroup numactl -m %d %s -l %s -n %d -a %s -- -i --socket-num=%d --no-numa" @@ -357,13 +357,13 @@ class DpdkHugetlbfsMountSize(TestCase): self.pci_info_0, self.numa_id, ) - self.dut.send_expect(launch_ttd, "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect(launch_ttd, "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) self.send_pkg(0) - self.verify_result(self.dut) - self.dut.send_expect("quit", "#", 15) + self.verify_result(self.sut_node) + self.sut_node.send_expect("quit", "#", 15) self.umount_huge([MNT_PATH[0]]) def tear_down(self): @@ -372,7 +372,7 @@ class DpdkHugetlbfsMountSize(TestCase): """ # If case fails, the mount should be cancelled to avoid affecting next cases self.umount_huge([MNT_PATH[0], MNT_PATH[1], MNT_PATH[2]]) - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): @@ -381,4 +381,4 @@ class DpdkHugetlbfsMountSize(TestCase): """ self.umount_huge([MNT_PATH[0], MNT_PATH[1], MNT_PATH[2]]) self.del_folder([MNT_PATH[0], MNT_PATH[1], MNT_PATH[2]]) - self.dut.send_expect("mount -t hugetlbfs nodev %s" % DEFAULT_MNT, "#", 15) + self.sut_node.send_expect("mount -t hugetlbfs nodev %s" % DEFAULT_MNT, "#", 15) diff --git a/tests/TestSuite_dual_vlan.py b/tests/TestSuite_dual_vlan.py index 99f47a82..ba8657f6 100644 --- a/tests/TestSuite_dual_vlan.py +++ b/tests/TestSuite_dual_vlan.py @@ -84,22 +84,22 @@ class TestDualVlan(TestCase): Vlan Prerequisites """ - global dutRxPortId - global dutTxPortId + global sutRxPortId + global sutTxPortId # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) - cores = self.dut.get_core_list("1S/2C/2T") + cores = self.sut_node.get_core_list("1S/2C/2T") coreMask = utils.create_mask(cores) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] portMask = utils.create_mask(valports[:2]) - dutRxPortId = valports[0] - dutTxPortId = valports[1] + sutRxPortId = valports[0] + sutTxPortId = valports[1] - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd( "Default", "--portmask=%s" % portMask, socket=self.ports_socket ) @@ -113,24 +113,24 @@ class TestDualVlan(TestCase): "I40E_10G-10G_BASE_T_BC", "I40E_10G-10G_BASE_T_X722", ]: - self.dut.send_expect("vlan set filter on all", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("vlan set filter on all", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") - out = self.dut.send_expect("set fwd mac", "testpmd> ") + out = self.sut_node.send_expect("set fwd mac", "testpmd> ") self.verify("Set mac packet forwarding mode" in out, "set fwd mac error") - out = self.dut.send_expect("start", "testpmd> ", 120) + out = self.sut_node.send_expect("start", "testpmd> ", 120) def start_tcpdump(self, rxItf): - self.tester.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -i %s -w ./getPackageByTcpdump.cap 2> /dev/null& " % rxItf, "#" ) def get_tcpdump_package(self): - self.tester.send_expect("killall tcpdump", "#") - self.tester.send_expect(" ", "#") - return self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + self.tg_node.send_expect(" ", "#") + return self.tg_node.send_expect( "tcpdump -nn -e -v -r ./getPackageByTcpdump.cap", "#" ) @@ -138,12 +138,12 @@ class TestDualVlan(TestCase): """ Send packet to portid """ - txPort = self.tester.get_local_port(dutRxPortId) - rxPort = self.tester.get_local_port(dutTxPortId) + txPort = self.tg_node.get_local_port(sutRxPortId) + rxPort = self.tg_node.get_local_port(sutTxPortId) - txItf = self.tester.get_interface(txPort) - rxItf = self.tester.get_interface(rxPort) - mac = self.dut.get_mac_address(dutRxPortId) + txItf = self.tg_node.get_interface(txPort) + rxItf = self.tg_node.get_interface(rxPort) + mac = self.sut_node.get_mac_address(sutRxPortId) self.start_tcpdump(rxItf) vlanString = 'sendp([Ether(dst="%s")/' % mac @@ -151,10 +151,10 @@ class TestDualVlan(TestCase): vlanString += "Dot1Q(id=0x8100,vlan=%s)/" % vid[i] vlanString += 'IP(len=46)],iface="%s", count=4)' % txItf - self.tester.scapy_append(vlanString) + self.tg_node.scapy_append(vlanString) # check link status before send pkg - self.pmdout.wait_link_status_up(self.dut_ports[0]) - self.tester.scapy_execute() + self.pmdout.wait_link_status_up(self.sut_ports[0]) + self.tg_node.scapy_execute() def mode_config(self, **modeName): """ @@ -174,28 +174,28 @@ class TestDualVlan(TestCase): # Intel® Ethernet 700 Series NIC vlan filter can't close, if want close need remove rx_vlan if mode == "filter": if modeName[mode] == "off": - self.dut.send_expect( - "rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> " ) continue else: - self.dut.send_expect( - "rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> " ) continue if mode == "stripq": - self.dut.send_expect( - "vlan set %s %s %s,0" % (mode, modeName[mode], dutRxPortId), + self.sut_node.send_expect( + "vlan set %s %s %s,0" % (mode, modeName[mode], sutRxPortId), "testpmd> ", ) else: - self.dut.send_expect( - "vlan set %s %s %s" % (mode, modeName[mode], dutRxPortId), + self.sut_node.send_expect( + "vlan set %s %s %s" % (mode, modeName[mode], sutRxPortId), "testpmd> ", ) - out = self.dut.send_expect("show port info %s" % dutRxPortId, "testpmd> ") + out = self.sut_node.send_expect("show port info %s" % sutRxPortId, "testpmd> ") for mode in modeName: if self.nic in [ "I40E_10G-SFP_XL710", @@ -209,13 +209,13 @@ class TestDualVlan(TestCase): # Intel® Ethernet 700 Series NIC vlan filter can't close, if want close need remove rx_vlan if mode == "filter": if modeName[mode] == "off": - self.dut.send_expect( - "rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> " ) continue else: - self.dut.send_expect( - "rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> " ) continue @@ -247,13 +247,13 @@ class TestDualVlan(TestCase): self.mode_config(strip=temp[0], filter=temp[1], extend=temp[2]) if (caseDef & txCase) != 0: - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect( - "tx_vlan set %s %s" % (dutTxPortId, txvlan), "testpmd> " + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect( + "tx_vlan set %s %s" % (sutTxPortId, txvlan), "testpmd> " ) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") configMode = "Strip %s, filter %s 0x1, extend %s, insert %s" % ( temp[0], @@ -263,49 +263,49 @@ class TestDualVlan(TestCase): ) if (caseDef & filterCase) != 0: - self.dut.send_expect( - "rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> " ) self.vlan_send_packet(outvlan, invlan) self.check_result(vlanCase[caseIndex][0], configMode + " result Error") - self.dut.send_expect( - "rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> " ) - self.dut.send_expect( - "rx_vlan add %s %s" % (invlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (invlan, sutRxPortId), "testpmd> " ) self.vlan_send_packet(outvlan, invlan) self.check_result(vlanCase[caseIndex][1], configMode + " result Error") - self.dut.send_expect( - "rx_vlan rm %s %s" % (invlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (invlan, sutRxPortId), "testpmd> " ) if (caseDef & txCase) != 0: - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan reset %s" % dutTxPortId, "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan reset %s" % sutTxPortId, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") else: - self.dut.send_expect( - "rx_vlan add %s %s" % (invlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (invlan, sutRxPortId), "testpmd> " ) - self.dut.send_expect( - "rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> " ) self.vlan_send_packet(outvlan, invlan) self.check_result(vlanCase[caseIndex], configMode + " result Error") if (caseDef & txCase) != 0: - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan reset %s" % dutTxPortId, "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect( - "rx_vlan rm %s %s" % (invlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan reset %s" % sutTxPortId, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (invlan, sutRxPortId), "testpmd> " ) - self.dut.send_expect( - "rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> " ) def check_result(self, resultKey, errorString): @@ -363,14 +363,14 @@ class TestDualVlan(TestCase): out = self.get_tcpdump_package() self.verify("vlan %s" % outvlan in out, "Vlan filter disable error: " + out) else: - self.dut.send_expect( - "rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> " ) self.vlan_send_packet(outvlan) out = self.get_tcpdump_package() self.verify("vlan %s" % outvlan in out, "Vlan filter disable error: " + out) - self.dut.send_expect( - "rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> " ) def test_vlan_filter_table(self): @@ -382,14 +382,14 @@ class TestDualVlan(TestCase): self.mode_config(strip="off") self.mode_config(extend="off") - self.dut.send_expect("rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> ") + self.sut_node.send_expect("rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> ") self.vlan_send_packet(outvlan) out = self.get_tcpdump_package() self.verify( "vlan %s" % outvlan in out, "vlan filter table enable error: " + out ) - self.dut.send_expect("rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> ") + self.sut_node.send_expect("rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> ") self.vlan_send_packet(outvlan) out = self.get_tcpdump_package() self.verify( @@ -416,8 +416,8 @@ class TestDualVlan(TestCase): "I40E_10G-10G_BASE_T_BC", "I40E_10G-10G_BASE_T_X722", ]: - self.dut.send_expect( - "rx_vlan add %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %s %s" % (outvlan, sutRxPortId), "testpmd> " ) self.vlan_send_packet(outvlan) out = self.get_tcpdump_package() @@ -438,8 +438,8 @@ class TestDualVlan(TestCase): "I40E_10G-10G_BASE_T_BC", "I40E_10G-10G_BASE_T_X722", ]: - self.dut.send_expect( - "rx_vlan rm %s %s" % (outvlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan rm %s %s" % (outvlan, sutRxPortId), "testpmd> " ) def test_vlan_stripq_config(self): @@ -478,23 +478,23 @@ class TestDualVlan(TestCase): # IGB_1G-82574L need to set CTRL.VME for vlan insert if self.nic == "IGB_1G-82574L": - self.dut.send_expect("vlan set strip on %s" % dutTxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip on %s" % sutTxPortId, "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan set %s %s" % (dutTxPortId, txvlan), "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan set %s %s" % (sutTxPortId, txvlan), "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.vlan_send_packet() out = self.get_tcpdump_package() self.verify("vlan %s" % txvlan in out, "vlan insert enable error: " + out) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan reset %s" % dutTxPortId, "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan reset %s" % sutTxPortId, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.vlan_send_packet() out = self.get_tcpdump_package() @@ -520,14 +520,14 @@ class TestDualVlan(TestCase): self.mode_config(filter="on", strip="on", extend="on") # nic only support inner model, except Intel® Ethernet 700 Series nic - self.dut.send_expect("vlan set inner tpid 1234 %s" % dutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set inner tpid 1234 %s" % sutRxPortId, "testpmd> ") self.vlan_send_packet(outvlan, invlan) out = self.get_tcpdump_package() self.verify("vlan %s" % outvlan in out, "vlan tpid disable error: " + out) self.verify("vlan %s" % invlan in out, "vlan tpid disable error: " + out) - self.dut.send_expect("vlan set inner tpid 0x8100 %s" % dutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set inner tpid 0x8100 %s" % sutRxPortId, "testpmd> ") self.vlan_send_packet(outvlan, invlan) out = self.get_tcpdump_package() @@ -566,5 +566,5 @@ class TestDualVlan(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() pass diff --git a/tests/TestSuite_dynamic_config.py b/tests/TestSuite_dynamic_config.py index 2812477a..16f35d50 100644 --- a/tests/TestSuite_dynamic_config.py +++ b/tests/TestSuite_dynamic_config.py @@ -38,26 +38,26 @@ class TestDynamicConfig(TestCase): """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") # Prepare cores and ports - self.portMask = utils.create_mask(self.dut_ports[:2]) - self.path = self.dut.apps_name["test-pmd"] + self.portMask = utils.create_mask(self.sut_ports[:2]) + self.path = self.sut_node.apps_name["test-pmd"] # launch app - self.eal_para = self.dut.create_eal_parameters(cores="1S/2C/2T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/2T") + self.sut_node.send_expect( r"%s %s -- -i --rxpt=0 --rxht=0 --rxwt=0 --txpt=39 --txht=0 --txwt=0 --portmask=%s" % (self.path, self.eal_para, self.portMask), "testpmd>", 120, ) # get dest address from self.target port - out = self.dut.send_expect("show port info %d" % self.dut_ports[0], "testpmd> ") + out = self.sut_node.send_expect("show port info %d" % self.sut_ports[0], "testpmd> ") - self.dest = self.dut.get_mac_address(self.dut_ports[0]) + self.dest = self.sut_node.get_mac_address(self.sut_ports[0]) mac_scanner = r"MAC address: (([\dA-F]{2}:){5}[\dA-F]{2})" ret = utils.regexp(out, mac_scanner) @@ -68,51 +68,51 @@ class TestDynamicConfig(TestCase): "Promiscuous mode: enabled" in out, "wrong default promiscuous value" ) - self.dut.kill_all() + self.sut_node.kill_all() def dynamic_config_send_packet(self, portid, destMac="00:11:22:33:44:55"): """ Send 1 packet to portid """ - self.pmd_output = PmdOutput(self.dut) + self.pmd_output = PmdOutput(self.sut_node) res = self.pmd_output.wait_link_status_up("all", 30) self.verify(res is True, "there have port link is down") - itf = self.tester.get_interface(self.tester.get_local_port(portid)) + itf = self.tg_node.get_interface(self.tg_node.get_local_port(portid)) - self.tester.scapy_foreground() - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append( 'sendp([Ether(dst="%s", src="52:00:00:00:00:00")/Raw(load="X"*26)], iface="%s", count=4)' % (destMac, itf) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def set_up(self): """ Run before each test case. """ - self.eal_para = self.dut.create_eal_parameters("1S/2C/2T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters("1S/2C/2T") + self.sut_node.send_expect( r"%s %s -- -i --rxpt=0 --rxht=0 --rxwt=0 --txpt=39 --txht=0 --txwt=0 --portmask=%s" % (self.path, self.eal_para, self.portMask), "testpmd>", 120, ) time.sleep(5) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) def test_dynamic_config_default_mode(self): """ Dynamic config default mode test """ - portid = self.dut_ports[0] + portid = self.sut_ports[0] # get the current rx statistic - out = self.dut.send_expect("clear port stats all", "testpmd> ") - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect("clear port stats all", "testpmd> ") + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -120,8 +120,8 @@ class TestDynamicConfig(TestCase): self.dynamic_config_send_packet(portid) pre_rxpkt = cur_rxpkt - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -134,8 +134,8 @@ class TestDynamicConfig(TestCase): self.dynamic_config_send_packet(portid, self.dest) pre_rxpkt = cur_rxpkt - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -149,25 +149,25 @@ class TestDynamicConfig(TestCase): Dynamic config disable promiscuous test """ - portid = self.dut_ports[0] + portid = self.sut_ports[0] - self.dut.send_expect("set promisc all off", "testpmd> ") - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + self.sut_node.send_expect("set promisc all off", "testpmd> ") + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") self.dynamic_config_send_packet(portid) pre_rxpkt = cur_rxpkt - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") self.verify(int(cur_rxpkt) == int(pre_rxpkt), "1st packet increment error") self.dynamic_config_send_packet(portid, self.dest) pre_rxpkt = cur_rxpkt - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") self.verify(int(cur_rxpkt) == int(pre_rxpkt) + 4, "2nd packet increment error") @@ -179,23 +179,23 @@ class TestDynamicConfig(TestCase): and dst mac not port mac, dpdk will not received packet. """ - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set fwd io", "testpmd> ") - self.dut.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set fwd io", "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") - self.dynamic_config_send_packet(self.dut_ports[0], "ff:ff:ff:ff:ff:ff") - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + self.dynamic_config_send_packet(self.sut_ports[0], "ff:ff:ff:ff:ff:ff") + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") self.verify(int(cur_rxpkt) == 4, "not received broadcast packet") - self.dut.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") - self.dynamic_config_send_packet(self.dut_ports[0]) - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + self.dynamic_config_send_packet(self.sut_ports[0]) + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -210,14 +210,14 @@ class TestDynamicConfig(TestCase): disable multicast, dpdk not received this packet """ - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set fwd io", "testpmd> ") - self.dut.send_expect("clear port stats all", "testpmd> ") - self.dut.send_expect("set allmulti all on", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set fwd io", "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("set allmulti all on", "testpmd> ") - self.dynamic_config_send_packet(self.dut_ports[0], "01:00:00:33:00:01") - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + self.dynamic_config_send_packet(self.sut_ports[0], "01:00:00:33:00:01") + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -225,12 +225,12 @@ class TestDynamicConfig(TestCase): int(cur_rxpkt) == 4, "enable allmulti switch, not received allmulti packet" ) - self.dut.send_expect("clear port stats all", "testpmd> ") - self.dut.send_expect("set allmulti all off", "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("set allmulti all off", "testpmd> ") - self.dynamic_config_send_packet(self.dut_ports[0], "01:00:00:33:00:01") - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + self.dynamic_config_send_packet(self.sut_ports[0], "01:00:00:33:00:01") + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -243,13 +243,13 @@ class TestDynamicConfig(TestCase): Dynamic config enable promiscuous test """ - portid = self.dut_ports[0] + portid = self.sut_ports[0] - self.dut.send_expect("set promisc %d on" % portid, "testpmd> ") + self.sut_node.send_expect("set promisc %d on" % portid, "testpmd> ") # get the current rx statistic - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -257,8 +257,8 @@ class TestDynamicConfig(TestCase): self.dynamic_config_send_packet(portid) pre_rxpkt = cur_rxpkt - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") @@ -269,21 +269,21 @@ class TestDynamicConfig(TestCase): self.dynamic_config_send_packet(portid, self.dest) pre_rxpkt = cur_rxpkt - out = self.dut.send_expect( - "show port stats %d" % self.dut_ports[1], "testpmd> " + out = self.sut_node.send_expect( + "show port stats %d" % self.sut_ports[1], "testpmd> " ) cur_rxpkt = utils.regexp(out, "TX-packets: ([0-9]+)") # check the packet increment self.verify(int(cur_rxpkt) == int(pre_rxpkt) + 4, "2nd packet increment error") - # self.dut.send_expect("quit", "# ", 30) + # self.sut_node.send_expect("quit", "# ", 30) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_dynamic_flowtype.py b/tests/TestSuite_dynamic_flowtype.py index 0abc6068..20918432 100644 --- a/tests/TestSuite_dynamic_flowtype.py +++ b/tests/TestSuite_dynamic_flowtype.py @@ -5,7 +5,7 @@ import re import time -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.pmd_output import PmdOutput from framework.test_case import TestCase @@ -19,15 +19,15 @@ class TestDynamicFlowtype(TestCase): self.is_eth_series_nic(700), "dynamic flow type mapping can not support %s nic" % self.nic, ) - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() self.verify(len(ports) >= 1, "Insufficient ports for testing") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] - self.dut_port = valports[0] - tester_port = self.tester.get_local_port(self.dut_port) - self.tester_intf = self.tester.get_interface(tester_port) + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] + self.sut_port = valports[0] + tg_port = self.tg_node.get_local_port(self.sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) profile_file = "dep/gtp.pkgo" profile_dst = "/tmp/" - self.dut.session.copy_file_to(profile_file, profile_dst) + self.sut_node.session.copy_file_to(profile_file, profile_dst) PF_Q_strip = "RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF" self.PF_QUEUE = self.search_queue_number(PF_Q_strip) @@ -35,8 +35,8 @@ class TestDynamicFlowtype(TestCase): """ Run before each test case. """ - self.dut_testpmd = PmdOutput(self.dut) - self.dut_testpmd.start_testpmd( + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), @@ -47,7 +47,7 @@ class TestDynamicFlowtype(TestCase): """ Search max queue number from configuration. """ - out = self.dut.send_expect("cat config/rte_config.h", "]# ", 10) + out = self.sut_node.send_expect("cat config/rte_config.h", "]# ", 10) pattern = "define (%s) (\d*)" % Q_strip s = re.compile(pattern) res = s.search(out) @@ -64,17 +64,17 @@ class TestDynamicFlowtype(TestCase): profile will be stored in binary file and need to be passed to AQ to program Intel® Ethernet 700 Series during initialization stage. """ - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - out = self.dut_testpmd.execute_cmd("ddp get list 0") - self.dut_testpmd.execute_cmd("ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + out = self.sut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port start all") time.sleep(1) - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") def gtp_packets(self, flowtype=26, match_opt="matched"): """ @@ -194,9 +194,9 @@ class TestDynamicFlowtype(TestCase): """ pkts = self.gtp_packets(flowtype, match_opt) for packet_type in list(pkts.keys()): - pkt = packet.Packet(pkts[packet_type]) - pkt.send_pkt(crb=self.tester, tx_port=self.tester_intf) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder(pkts[packet_type]) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tg_intf) + out = self.sut_node.get_session_output(timeout=2) if match_opt == "matched": self.verify( "RTE_MBUF_F_RX_RSS_HASH" in out, @@ -218,7 +218,7 @@ class TestDynamicFlowtype(TestCase): reset: If reset is true, reset the contents of flow type to pctype mapping. If reset is false, enable rss hash for new protocal. """ - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: 63 -> flowtype: 14" in out, "Failed show flow type to pctype mapping!!!", @@ -227,19 +227,19 @@ class TestDynamicFlowtype(TestCase): "pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed show flow type to pctype mapping!!!", ) - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 pctype mapping update %s %s" % (pctype, flowtype) ) - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: %s -> flowtype: %s" % (pctype, flowtype) in out, "Failed update flow type to pctype mapping!!!", ) if reset is False: - self.dut_testpmd.execute_cmd("port config all rss %s" % flowtype) + self.sut_testpmd.execute_cmd("port config all rss %s" % flowtype) else: - self.dut_testpmd.execute_cmd("port config 0 pctype mapping reset") - out = self.dut_testpmd.execute_cmd("show port 0 pctype mapping") + self.sut_testpmd.execute_cmd("port config 0 pctype mapping reset") + out = self.sut_testpmd.execute_cmd("show port 0 pctype mapping") self.verify( "pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed reset flow type to pctype mapping!!!", @@ -260,7 +260,7 @@ class TestDynamicFlowtype(TestCase): information correctness, includes used protocols, packet classification types, defined packet types and so on. """ - out = self.dut_testpmd.execute_cmd("ddp get info /tmp/gtp.pkgo") + out = self.sut_testpmd.execute_cmd("ddp get info /tmp/gtp.pkgo") self.verify( "i40e Profile Version" in out, "Failed to verify profile version!!!" ) @@ -312,18 +312,18 @@ class TestDynamicFlowtype(TestCase): self.dynamic_flowtype_test(pctype=25, flowtype=25, reset=False) def tear_down(self): - self.dut_testpmd.execute_cmd("stop") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("ddp get list 0") if "Profile number is: 0" not in out: - self.dut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port stop all") time.sleep(1) - self.dut_testpmd.execute_cmd("ddp del 0 /tmp/gtp.bak") - out = self.dut_testpmd.execute_cmd("ddp get list 0") + self.sut_testpmd.execute_cmd("ddp del 0 /tmp/gtp.bak") + out = self.sut_testpmd.execute_cmd("ddp get list 0") self.verify( "Profile number is: 0" in out, "Failed to delete ddp profile!!!" ) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.quit() + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.quit() def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_dynamic_queue.py b/tests/TestSuite_dynamic_queue.py index ebbe869c..cbd6469d 100644 --- a/tests/TestSuite_dynamic_queue.py +++ b/tests/TestSuite_dynamic_queue.py @@ -7,8 +7,8 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import get_nic_name from framework.test_case import TestCase @@ -17,42 +17,42 @@ test_loop = 3 class TestDynamicQueue(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - out = self.dut.send_expect("cat config/rte_config.h", "]# ", 10) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + out = self.sut_node.send_expect("cat config/rte_config.h", "]# ", 10) self.PF_Q_strip = "RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF" pattern = "define (%s) (\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern, True) - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - self.dut_testpmd = PmdOutput(self.dut) + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + self.sut_testpmd = PmdOutput(self.sut_node) def set_up(self): # Intel® Ethernet Converged Network Adapter XL710-QDA1 needs more cores to run properly if self.nic in ["I40E_40G-QSFP_A"]: self.verify( - "len(self.dut.cores)>=7", "Less than seven cores can't run testpmd" + "len(self.sut_node.cores)>=7", "Less than seven cores can't run testpmd" ) - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "all", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), ) elif self.nic in ["cavium_a063", "cavium_a064"]: eal_opts = "" - for port in self.dut_ports: + for port in self.sut_ports: eal_opts += "-a %s,max_pools=256 " % ( - self.dut.get_port_pci(self.dut_ports[port]) + self.sut_node.get_port_pci(self.sut_ports[port]) ) - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), eal_param=eal_opts, ) else: - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), @@ -73,13 +73,13 @@ class TestDynamicQueue(TestCase): def send_packet(self): """ - Generate packets and send them to dut + Generate packets and send them to SUT """ - mac = self.dut.get_mac_address(0) + mac = self.sut_node.get_mac_address(0) pktnum = self.PF_QUEUE * 4 - pkt = Packet() - pkt.generate_random_pkts(mac, pktnum=pktnum, random_type=["IP_RAW"]) - pkt.send_pkt(self.tester, tx_port=self.tester_intf) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.generate_random_pkts(mac, pktnum=pktnum, random_type=["IP_RAW"]) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf) def rxq_setup_test(self, chgflag=0): """ @@ -91,13 +91,13 @@ class TestDynamicQueue(TestCase): queue = list() for i in range(test_loop): queue.append(random.randint(1, self.PF_QUEUE - 1)) - self.dut_testpmd.execute_cmd("port 0 rxq %d stop" % queue[i]) + self.sut_testpmd.execute_cmd("port 0 rxq %d stop" % queue[i]) - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("start") self.send_packet() - self.dut.get_session_output(timeout=10) - out = self.dut_testpmd.execute_cmd("stop") + self.sut_node.get_session_output(timeout=10) + out = self.sut_testpmd.execute_cmd("stop") # Check Rx stopped queues can't receive packets for i in range(test_loop): @@ -108,18 +108,18 @@ class TestDynamicQueue(TestCase): if chgflag == 1: for i in range(test_loop): - out = self.dut_testpmd.execute_cmd("show rxq info 0 %d" % queue[i]) + out = self.sut_testpmd.execute_cmd("show rxq info 0 %d" % queue[i]) qring_strip = "Number of RXDs: " pattern = "%s([0-9]+)" % qring_strip qringsize = self.element_strip(out, pattern) chg_qringsize = qringsize % 1024 + 256 if qringsize == 512: chg_qringsize = 256 - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 rxq %d ring_size %d" % (queue[i], chg_qringsize) ) - self.dut_testpmd.execute_cmd("port 0 rxq %d setup" % queue[i]) - out = self.dut_testpmd.execute_cmd("show rxq info 0 %d" % queue[i]) + self.sut_testpmd.execute_cmd("port 0 rxq %d setup" % queue[i]) + out = self.sut_testpmd.execute_cmd("show rxq info 0 %d" % queue[i]) chk_qringsize = self.element_strip(out, pattern) self.verify( chk_qringsize == chg_qringsize, @@ -128,13 +128,13 @@ class TestDynamicQueue(TestCase): for i in range(test_loop): if chgflag == 0: - self.dut_testpmd.execute_cmd("port 0 rxq %d setup" % queue[i]) - self.dut_testpmd.execute_cmd("port 0 rxq %d start" % queue[i]) + self.sut_testpmd.execute_cmd("port 0 rxq %d setup" % queue[i]) + self.sut_testpmd.execute_cmd("port 0 rxq %d start" % queue[i]) - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("start") self.send_packet() - self.dut.get_session_output(timeout=10) - out = self.dut_testpmd.execute_cmd("stop") + self.sut_node.get_session_output(timeout=10) + out = self.sut_testpmd.execute_cmd("stop") # Check Rx setup queues could receive packets for i in range(test_loop): @@ -152,15 +152,15 @@ class TestDynamicQueue(TestCase): """ for i in range(test_loop): queue = random.randint(1, self.PF_QUEUE - 1) - out = self.dut_testpmd.execute_cmd("show txq info 0 %d" % queue) + out = self.sut_testpmd.execute_cmd("show txq info 0 %d" % queue) qring_strip = "Number of TXDs: " pattern = "%s([0-9]+)" % qring_strip qringsize = self.element_strip(out, pattern) - self.dut_testpmd.execute_cmd("port 0 txq %d stop" % queue) - self.dut_testpmd.execute_cmd("set fwd txonly") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("port 0 txq %d stop" % queue) + self.sut_testpmd.execute_cmd("set fwd txonly") + self.sut_testpmd.execute_cmd("start") time.sleep(10) - out = self.dut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("stop") tx_num = qringsize - 1 if self.nic in ["cavium_a063", "cavium_a064"]: @@ -174,23 +174,23 @@ class TestDynamicQueue(TestCase): chg_qringsize = qringsize % 1024 + 256 if qringsize == 512: chg_qringsize = 256 - self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd( "port config 0 txq %d ring_size %d" % (queue, chg_qringsize) ) - self.dut_testpmd.execute_cmd("port 0 txq %d setup" % queue) - out = self.dut_testpmd.execute_cmd("show txq info 0 %d" % queue) + self.sut_testpmd.execute_cmd("port 0 txq %d setup" % queue) + out = self.sut_testpmd.execute_cmd("show txq info 0 %d" % queue) chk_qringsize = self.element_strip(out, pattern) self.verify( chk_qringsize == chg_qringsize, "Fail to change ring size at runtime!", ) if chgflag == 0: - self.dut_testpmd.execute_cmd("port 0 txq %d setup" % queue) + self.sut_testpmd.execute_cmd("port 0 txq %d setup" % queue) - self.dut_testpmd.execute_cmd("port 0 txq %d start" % queue) - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("port 0 txq %d start" % queue) + self.sut_testpmd.execute_cmd("start") time.sleep(10) - out = self.dut_testpmd.execute_cmd("stop") + out = self.sut_testpmd.execute_cmd("stop") """ Check Tx setup queue could transmit packets normally, not only qringsize-1 packets @@ -230,7 +230,7 @@ class TestDynamicQueue(TestCase): self.txq_setup_test(chgflag=1) def tear_down(self): - self.dut_testpmd.quit() + self.sut_testpmd.quit() def tear_down_all(self): pass diff --git a/tests/TestSuite_eeprom_dump.py b/tests/TestSuite_eeprom_dump.py index c9ffaf1c..eef924c3 100644 --- a/tests/TestSuite_eeprom_dump.py +++ b/tests/TestSuite_eeprom_dump.py @@ -19,9 +19,9 @@ class TestEEPROMDump(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports() + self.ports = self.sut_node.get_ports() - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ @@ -31,15 +31,15 @@ class TestEEPROMDump(TestCase): def clean_up_and_compare(self, testname, port): # comapre the two files - result = self.dut.send_expect( + result = self.sut_node.send_expect( f"diff testpmd_{testname}_{port}.txt ethtool_{testname}_{port}.txt", "#" ) # Clean up files - self.dut.send_expect(f"rm ethtool_{testname}_raw_{port}.txt", "#") - self.dut.send_expect(f"rm ethtool_{testname}_hex_{port}.txt", "#") - self.dut.send_expect(f"rm ethtool_{testname}_{port}.txt", "#") - self.dut.send_expect(f"rm testpmd_{testname}_{port}.txt", "#") + self.sut_node.send_expect(f"rm ethtool_{testname}_raw_{port}.txt", "#") + self.sut_node.send_expect(f"rm ethtool_{testname}_hex_{port}.txt", "#") + self.sut_node.send_expect(f"rm ethtool_{testname}_{port}.txt", "#") + self.sut_node.send_expect(f"rm testpmd_{testname}_{port}.txt", "#") self.verify(not result, "Testpmd dumped is not same as linux dumped") @@ -57,13 +57,13 @@ class TestEEPROMDump(TestCase): n = n + 1 if n <= count: line = line.replace(" ", "").lower() - self.dut.send_expect(f"echo {line} >> {to}", "#") + self.sut_node.send_expect(f"echo {line} >> {to}", "#") # Get testpmd output to have only hex value else: for line in re.findall(regex, get): line = line.replace(" ", "").lower() - self.dut.send_expect(f"echo {line} >> {to}", "#") + self.sut_node.send_expect(f"echo {line} >> {to}", "#") def check_output(self, testname, ethcommand): self.pmdout.start_testpmd("Default") @@ -71,7 +71,7 @@ class TestEEPROMDump(TestCase): for port in self.ports: # show port {port} eeprom has 10485760 bytes, and it takes about 13 minutes to show finish. - pmdout = self.dut.send_expect( + pmdout = self.sut_node.send_expect( f"show port {port} {testname}", "testpmd>", timeout=800 ) self.verify("Finish --" in pmdout, f"{testname} dump failed") @@ -83,11 +83,11 @@ class TestEEPROMDump(TestCase): portinfo = {"port": port, "length": length, "pmdout": pmdout} portsinfo.append(portinfo) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") # Bind to the default driver to use ethtool after quit testpmd for port in self.ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] portinfo = portsinfo[port] # strip original driver @@ -108,15 +108,15 @@ class TestEEPROMDump(TestCase): testname, ) - self.dut.send_expect( + self.sut_node.send_expect( f"ethtool {ethcommand} {iface} raw on length {portinfo['length']} >> ethtool_{testname}_raw_{port}.txt", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( f"xxd ethtool_{testname}_raw_{port}.txt >> ethtool_{testname}_hex_{port}.txt", "#", ) - portinfo["ethout"] = self.dut.send_expect( + portinfo["ethout"] = self.sut_node.send_expect( f"cat ethtool_{testname}_hex_{port}.txt", "# ", trim_whitespace=False ) @@ -143,11 +143,11 @@ class TestEEPROMDump(TestCase): """ Run after each test case. """ - self.dut.kill_all() - self.dut.bind_interfaces_linux(self.drivername) + self.sut_node.kill_all() + self.sut_node.bind_interfaces_linux(self.drivername) def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_efd.py b/tests/TestSuite_efd.py index 9d94a3d4..2096012e 100644 --- a/tests/TestSuite_efd.py +++ b/tests/TestSuite_efd.py @@ -9,8 +9,8 @@ import os import re import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestEFD(TestCase): @@ -21,10 +21,10 @@ class TestEFD(TestCase): self.build_server_node_efd() - self.dut_ports = self.dut.get_ports() - self.node_app = self.dut.apps_name["node"] - self.server_app = self.dut.apps_name["server"] - self.app_test_path = self.dut.apps_name["test"] + self.sut_ports = self.sut_node.get_ports() + self.node_app = self.sut_node.apps_name["node"] + self.server_app = self.sut_node.apps_name["server"] + self.app_test_path = self.sut_node.apps_name["test"] # get dts output path if self.logger.log_path.startswith(os.sep): self.output_path = self.logger.log_path @@ -32,12 +32,12 @@ class TestEFD(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def build_server_node_efd(self): apps = ["node", "server"] for app in apps: - out = self.dut.build_dpdk_apps("./examples/server_node_efd/%s" % app) + out = self.sut_node.build_dpdk_apps("./examples/server_node_efd/%s" % app) self.verify("Error" not in out, "Compilation %s error" % app) self.verify("No such" not in out, "Compilation %s error" % app) @@ -51,28 +51,28 @@ class TestEFD(TestCase): """ Run EFD unit test """ - eal_para = self.dut.create_eal_parameters(cores=[0, 1, 2, 3]) - self.dut.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) - out = self.dut.send_expect("efd_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + eal_para = self.sut_node.create_eal_parameters(cores=[0, 1, 2, 3]) + self.sut_node.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) + out = self.sut_node.send_expect("efd_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_efd_unit_perf(self): """ Run EFD unit perf test """ - eal_para = self.dut.create_eal_parameters(cores=[0, 1, 2, 3]) - self.dut.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) - out = self.dut.send_expect("efd_perf_autotest", "RTE>>", 120) + eal_para = self.sut_node.create_eal_parameters(cores=[0, 1, 2, 3]) + self.sut_node.send_expect("./%s %s" % (self.app_test_path, eal_para), "RTE>>", 60) + out = self.sut_node.send_expect("efd_perf_autotest", "RTE>>", 120) self.logger.info(out) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_perf_efd_nodenum(self): """ Run EFD perf evaluation for number of nodes """ - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.verify(len(self.sut_ports) >= 2, "Not enough ports") node_nums = [1, 2, 3, 4, 5, 6, 7, 8] flow_num = 1024 * 1024 * 2 @@ -95,8 +95,8 @@ class TestEFD(TestCase): self.logger.warning( "Millions of flow required huge memory, please allocate 16G hugepage" ) - self.dut.setup_memory_linux(hugepages=8192) - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.sut_node.setup_memory_linux(hugepages=8192) + self.verify(len(self.sut_ports) >= 2, "Not enough ports") flow_nums = [ 1024 * 1024, 1024 * 1024 * 2, @@ -122,7 +122,7 @@ class TestEFD(TestCase): """ Run EFD perf evaluation for different value size """ - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.verify(len(self.sut_ports) >= 2, "Not enough ports") val_bitnums = [8, 16, 24, 32] flow_num = 1024 * 1024 * 2 @@ -133,7 +133,7 @@ class TestEFD(TestCase): for val_bitnum in val_bitnums: # change value length and rebuild dpdk extra_options = "-Dc_args=-DRTE_EFD_VALUE_NUM_BITS=%d" % val_bitnum - self.dut.build_install_dpdk(self.target, extra_options=extra_options) + self.sut_node.build_install_dpdk(self.target, extra_options=extra_options) self.build_server_node_efd() pps = self._efd_perf_evaluate(2, flow_num) @@ -141,7 +141,7 @@ class TestEFD(TestCase): self.result_table_print() extra_options = "-Dc_args=-DRTE_EFD_VALUE_NUM_BITS=8" - self.dut.build_install_dpdk(self.target, extra_options=extra_options) + self.sut_node.build_install_dpdk(self.target, extra_options=extra_options) self.build_server_node_efd() def _efd_perf_evaluate(self, node_num, flow_num): @@ -150,27 +150,27 @@ class TestEFD(TestCase): # output port is calculated from overall ports number server_cmd_fmt = "%s %s -- -p 0x3 -n %d -f %s" node_cmd_fmt = "%s %s --proc-type=secondary -- -n %d" - socket = self.dut.get_numa_id(self.dut_ports[0]) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) pcap = os.sep.join([self.output_path, "efd.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether()/IP(src="0.0.0.0", dst="0.0.0.0")/("X"*26)])' % pcap ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[1]) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[1]) pcap = os.sep.join([self.output_path, "efd.pcap"]) tgen_input.append((tx_port, rx_port, pcap)) tgen_input.append((rx_port, tx_port, pcap)) - cores = self.dut.get_core_list("1S/%dC/1T" % (node_num + 2), socket) + cores = self.sut_node.get_core_list("1S/%dC/1T" % (node_num + 2), socket) self.verify(len(cores), "Can't find enough cores") - eal_para = self.dut.create_eal_parameters(cores=cores[0:2], ports=[0, 1]) + eal_para = self.sut_node.create_eal_parameters(cores=cores[0:2], ports=[0, 1]) server_cmd = server_cmd_fmt % ( self.server_app, eal_para, @@ -178,30 +178,30 @@ class TestEFD(TestCase): hex(flow_num), ) # create table may need few minutes - self.dut.send_expect(server_cmd, "Finished Process Init", timeout=240) + self.sut_node.send_expect(server_cmd, "Finished Process Init", timeout=240) node_sessions = [] for node in range(node_num): - eal_para = self.dut.create_eal_parameters(cores=[cores[2 + node]]) + eal_para = self.sut_node.create_eal_parameters(cores=[cores[2 + node]]) node_cmd = node_cmd_fmt % (self.node_app, eal_para, node) - node_session = self.dut.new_session(suite="node%d" % node) + node_session = self.sut_node.new_session(suite="node%d" % node) node_sessions.append(node_session) node_session.send_expect(node_cmd, "Finished Process Init", timeout=30) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) for node_session in node_sessions: node_session.send_expect("^C", "#") - self.dut.close_session(node_session) + self.sut_node.close_session(node_session) - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") pps /= 1000000.0 return pps @@ -221,7 +221,7 @@ class TestEFD(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() pass def tear_down_all(self): diff --git a/tests/TestSuite_enable_package_download_in_ice_driver.py b/tests/TestSuite_enable_package_download_in_ice_driver.py index beda14a8..a5aef3fa 100644 --- a/tests/TestSuite_enable_package_download_in_ice_driver.py +++ b/tests/TestSuite_enable_package_download_in_ice_driver.py @@ -16,30 +16,30 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): self.nic in ["ICE_100G-E810C_QSFP", "ICE_25G-E810C_SFP"], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") self.PF_QUEUE = 16 - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - localPort1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_p0 = self.tester.get_interface(localPort0) - self.tester_p1 = self.tester.get_interface(localPort1) - self.tester.send_expect("ifconfig %s -promisc" % self.tester_p0, "#") - self.tester.send_expect("ifconfig %s -promisc" % self.tester_p1, "#") + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + localPort1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_p0 = self.tg_node.get_interface(localPort0) + self.tg_p1 = self.tg_node.get_interface(localPort1) + self.tg_node.send_expect("ifconfig %s -promisc" % self.tg_p0, "#") + self.tg_node.send_expect("ifconfig %s -promisc" % self.tg_p1, "#") - self.dut_p0_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.tester_p0_mac = self.tester.get_mac(localPort0) - self.dut_testpmd = PmdOutput(self.dut) - self.path = self.dut.apps_name["test-pmd"] + self.sut_p0_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.tg_p0_mac = self.tg_node.get_mac(localPort0) + self.sut_testpmd = PmdOutput(self.sut_node) + self.path = self.sut_node.apps_name["test-pmd"] self.pkg_file1 = "/lib/firmware/intel/ice/ddp/ice.pkg" self.pkg_file2 = "/lib/firmware/updates/intel/ice/ddp/ice.pkg" - out = self.dut.send_expect("ls %s" % self.pkg_file1, "#") + out = self.sut_node.send_expect("ls %s" % self.pkg_file1, "#") self.verify( "No such file or directory" not in out, "Cannot find %s, please check you system/driver." % self.pkg_file1, ) - out = self.dut.send_expect("ls %s" % self.pkg_file2, "#") + out = self.sut_node.send_expect("ls %s" % self.pkg_file2, "#") self.verify( "No such file or directory" not in out, "Cannot find %s, please check you system/driver." % self.pkg_file2, @@ -56,10 +56,10 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): """ backup_file = "/opt/ice.pkg_backup" if flag == "backup": - self.dut.send_expect("\cp %s %s" % (self.pkg_file1, backup_file), "#") + self.sut_node.send_expect("\cp %s %s" % (self.pkg_file1, backup_file), "#") else: - self.dut.send_expect("\cp %s %s" % (backup_file, self.pkg_file1), "#") - self.dut.send_expect("\cp %s %s" % (backup_file, self.pkg_file2), "#") + self.sut_node.send_expect("\cp %s %s" % (backup_file, self.pkg_file1), "#") + self.sut_node.send_expect("\cp %s %s" % (backup_file, self.pkg_file2), "#") def use_correct_ice_pkg(self, flag="true"): """ @@ -69,20 +69,20 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): if flag == "true": self.backup_recover_ice_pkg("recover") else: - self.dut.send_expect("rm -rf %s" % self.pkg_file1, "#") - self.dut.send_expect("touch %s" % self.pkg_file1, "#") - self.dut.send_expect("rm -rf %s" % self.pkg_file2, "#") - self.dut.send_expect("touch %s" % self.pkg_file2, "#") + self.sut_node.send_expect("rm -rf %s" % self.pkg_file1, "#") + self.sut_node.send_expect("touch %s" % self.pkg_file1, "#") + self.sut_node.send_expect("rm -rf %s" % self.pkg_file2, "#") + self.sut_node.send_expect("touch %s" % self.pkg_file2, "#") def start_testpmd(self, ice_pkg="true", safe_mode_support="false"): self.eal_param = "" if safe_mode_support == "true": - for i in range(len(self.dut_ports)): + for i in range(len(self.sut_ports)): self.eal_param = ( self.eal_param - + "-a %s,safe-mode-support=1 " % self.dut.ports_info[i]["pci"] + + "-a %s,safe-mode-support=1 " % self.sut_node.ports_info[i]["pci"] ) - out = self.dut_testpmd.start_testpmd( + out = self.sut_testpmd.start_testpmd( "all", "--nb-cores=8 --rxq=%s --txq=%s --port-topology=chained" % (self.PF_QUEUE, self.PF_QUEUE), @@ -105,13 +105,13 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): error_message in out, "There should be error messages in out: %s" % out, ) - self.dut_testpmd.execute_cmd("set promisc all off") - self.dut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("set promisc all off") + self.sut_testpmd.execute_cmd("set verbose 1") def tcpdump_start_sniffing(self, ifaces=[]): """ - Starts tcpdump in the background to sniff the tester interface where - the packets are transmitted to and from the self.dut. + Starts tcpdump in the background to sniff the TG interface where + the packets are transmitted to and from the self.sut_node. All the captured packets are going to be stored in a file for a post-analysis. """ @@ -121,30 +121,30 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): iface ) del_cmd = ("rm -f tcpdump_{0}.pcap").format(iface) - self.tester.send_expect(del_cmd, "#") - self.tester.send_expect(command, "#") + self.tg_node.send_expect(del_cmd, "#") + self.tg_node.send_expect(command, "#") def tcpdump_stop_sniff(self): """ Stops the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") time.sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "#") + self.tg_node.send_expect('echo "Cleaning buffer"', "#") time.sleep(1) def tcpdump_command(self, command): """ Sends a tcpdump related command and returns an integer from the output """ - result = self.tester.send_expect(command, "#") + result = self.tg_node.send_expect(command, "#") print(result) return int(result.strip()) def number_of_packets(self, iface): """ By reading the file generated by tcpdump it counts how many packets were - forwarded by the sample app and received in the self.tester. The sample app + forwarded by the sample app and received in the self.tg_node. The sample app will add a known MAC address for the test to look for. """ command = ( @@ -157,7 +157,7 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): """ Execute scanner to return results """ - scanner_result = self.tester.send_expect(scanner, "#") + scanner_result = self.tg_node.send_expect(scanner, "#") fially_result = re.findall(r"length( \d+)", scanner_result) return list(fially_result) @@ -166,114 +166,114 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): Sends packets. """ self.loading_size = 30 - self.tester.scapy_foreground() - self.tester.scapy_append('sys.path.append("./")') - self.tester.scapy_append("from sctp import *") + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('sys.path.append("./")') + self.tg_node.scapy_append("from sctp import *") if tran_type == "ipv4-other": for i in range(1): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/("X"*%s)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, self.loading_size, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-udp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-sctp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-tcp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IPv6(src="::%d", dst="::%d")/TCP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-udp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IPv6(src="::%d", dst="::%d")/UDP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-sctp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IPv6(src="::%d", dst="::%d",nh=132)/SCTP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") @@ -287,11 +287,11 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): """ if tran_type == "ipv4-other": self.tcpdump_stop_sniff() - p0_stats = self.number_of_packets(self.tester_p0) - p1_stats = self.number_of_packets(self.tester_p1) - self.verify(p0_stats == p1_stats, "tester p0 and p1: packet number match") + p0_stats = self.number_of_packets(self.tg_p0) + p1_stats = self.number_of_packets(self.tg_p1) + self.verify(p0_stats == p1_stats, "TG p0 and p1: packet number match") else: - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() queue_list = [] lines = out.split("\r\n") for line in lines: @@ -325,14 +325,14 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): self.use_correct_ice_pkg(ice_pkg) self.start_testpmd(ice_pkg, safe_mode_support) - self.dut_testpmd.execute_cmd("set fwd mac") - self.dut_testpmd.execute_cmd("start") - self.tcpdump_start_sniffing([self.tester_p0, self.tester_p1]) + self.sut_testpmd.execute_cmd("set fwd mac") + self.sut_testpmd.execute_cmd("start") + self.tcpdump_start_sniffing([self.tg_p0, self.tg_p1]) self.send_packet(tran_type="ipv4-other", flag=ice_pkg) - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("start") for tran_types in [ "ipv4-tcp", "ipv4-udp", @@ -366,7 +366,7 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): + "-c 0x7 -n 4 -- -i --nb-cores=8 --rxq=%s --txq=%s --port-topology=chained" % (self.PF_QUEUE, self.PF_QUEUE) ) - out = self.dut.send_expect(cmd, "#", 60) + out = self.sut_node.send_expect(cmd, "#", 60) error_messages = [ "ice_load_pkg(): ice_copy_and_init_hw failed: -1", "ice_dev_init(): Failed to load the DDP package,Use safe-mode-support=1 to enter Safe Mode", @@ -379,17 +379,17 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): def get_sn(self, nic_pci): cmd = "lspci -vs %s | grep 'Device Serial Number'" % nic_pci - out = self.dut.send_expect(cmd, "#") + out = self.sut_node.send_expect(cmd, "#") sn_temp = re.findall(r"Device Serial Number (.*)", out) sn = re.sub("-", "", sn_temp[0]) return sn def check_env(self): """ - Check the DUT has two or more Intel® Ethernet 800 Series NICs. If not, return + Check the SUT has two or more Intel® Ethernet 800 Series NICs. If not, return "the case needs >=2 Intel® Ethernet 800 Series NICs with different Serial Numbers" """ - self.nic_pci = [self.dut.ports_info[0]["pci"], self.dut.ports_info[-1]["pci"]] + self.nic_pci = [self.sut_node.ports_info[0]["pci"], self.sut_node.ports_info[-1]["pci"]] self.nic_sn = [self.get_sn(self.nic_pci[0]), self.get_sn(self.nic_pci[1])] self.verify( self.nic_sn[0] != self.nic_sn[1], @@ -398,18 +398,18 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): def copy_specify_ice_pkg(self, pkg_ver): """ - Copy 2 different ``ice-xxx.pkg`` from dts/dep to dut /tmp/ + Copy 2 different ``ice-xxx.pkg`` from dts/dep to SUT /tmp/ pkg_files = ['ice-1.3.4.0.pkg', 'ice-1.3.10.0.pkg'] """ dst = "/tmp" pkg_file = "ice-%s.pkg" % pkg_ver src_file = r"./dep/%s" % pkg_file - self.dut.session.copy_file_to(src_file, dst) + self.sut_node.session.copy_file_to(src_file, dst) def generate_delete_specify_pkg(self, pkg_ver, sn, key="true"): - self.dut.send_expect("rm -rf /lib/firmware/intel/ice/ddp/ice-%s.pkg" % sn, "#") + self.sut_node.send_expect("rm -rf /lib/firmware/intel/ice/ddp/ice-%s.pkg" % sn, "#") if key == "true": - self.dut.send_expect( + self.sut_node.send_expect( "\cp /tmp/ice-%s.pkg /lib/firmware/intel/ice/ddp/ice-%s.pkg" % (pkg_ver, sn), "#", @@ -434,8 +434,8 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): eal_param = ( "-a %s " % self.nic_pci[0] + "-a %s " % self.nic_pci[1] + "--log-level=8" ) - out = self.dut_testpmd.execute_cmd(self.path + eal_param + " -- -i ") - self.dut_testpmd.quit() + out = self.sut_testpmd.execute_cmd(self.path + eal_param + " -- -i ") + self.sut_testpmd.quit() # Delete ice-.pkg to recover the ENV for i in range(len(self.new_pkgs)): @@ -451,7 +451,7 @@ class TestEnable_Package_Download_In_Ice_Driver(TestCase): ) def tear_down(self): - self.dut_testpmd.quit() + self.sut_testpmd.quit() def tear_down_all(self): """ diff --git a/tests/TestSuite_ethtool_stats.py b/tests/TestSuite_ethtool_stats.py index abaf05a3..a529caee 100644 --- a/tests/TestSuite_ethtool_stats.py +++ b/tests/TestSuite_ethtool_stats.py @@ -16,8 +16,8 @@ from functools import reduce from scapy.sendrecv import sendp from framework.exception import VerifyFailure -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase from framework.utils import create_mask as dts_create_mask @@ -28,28 +28,28 @@ class TestEthtoolStats(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir def d_a_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - output = self.dut.alt_session.send_expect(*_cmd) - output2 = self.dut.alt_session.session.get_session_before(1) + output = self.sut_node.alt_session.send_expect(*_cmd) + output2 = self.sut_node.alt_session.session.get_session_before(1) return output + os.linesep + output2 def send_packet(self, pkt_config, src_intf): for pkt_type in list(pkt_config.keys()): - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) # set packet every layer's input parameters if "layer_configs" in list(pkt_config[pkt_type].keys()): pkt_configs = pkt_config[pkt_type]["layer_configs"] if pkt_configs: for layer in list(pkt_configs.keys()): - pkt.config_layer(layer, pkt_configs[layer]) - pkt.send_pkt(crb=self.tester, tx_port=src_intf, count=1) + scapy_pkt_builder.config_layer(layer, pkt_configs[layer]) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=src_intf, count=1) time.sleep(1) def traffic(self): @@ -69,7 +69,7 @@ class TestEthtoolStats(TestCase): self.send_packet(pkt_config, src_intf) def init_testpmd(self): - self.testpmd = PmdOutput(self.dut) + self.testpmd = PmdOutput(self.sut_node) self.is_pmd_on = False def start_testpmd(self): @@ -126,9 +126,9 @@ class TestEthtoolStats(TestCase): time.sleep(1) def init_proc_info(self): - ports_count = len(self.dut_ports) + ports_count = len(self.sut_ports) ports_mask = reduce(lambda x, y: x | y, [0x1 << x for x in range(ports_count)]) - app_name = self.dut.apps_name["proc-info"].split("/")[-1] + app_name = self.sut_node.apps_name["proc-info"].split("/")[-1] self.query_tool = os.path.join( self.target_dir, self.target, @@ -439,10 +439,10 @@ class TestEthtoolStats(TestCase): self.is_pmd_on = None # get link port pairs port_num = 0 - local_port = self.tester.get_local_port(port_num) + local_port = self.tg_node.get_local_port(port_num) self.link_topo = [ - self.tester.get_interface(local_port), - self.tester.get_mac(local_port), + self.tg_node.get_interface(local_port), + self.tg_node.get_mac(local_port), ] # set packet sizes for testing different type self.frame_sizes = [64, 72, 128, 256, 512, 1024] @@ -455,9 +455,9 @@ class TestEthtoolStats(TestCase): # def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.prefix = "dpdk_" + self.dut.prefix_subfix + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.prefix = "dpdk_" + self.sut_node.prefix_subfix self.preset_test_environment() def set_up(self): diff --git a/tests/TestSuite_eventdev_perf.py b/tests/TestSuite_eventdev_perf.py index 198b4553..6e814858 100644 --- a/tests/TestSuite_eventdev_perf.py +++ b/tests/TestSuite_eventdev_perf.py @@ -15,10 +15,10 @@ from time import sleep import framework.utils as utils import nics.perf_report as perf_report -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import FOLDERS, HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from nics.system_info import SystemInfo @@ -54,33 +54,33 @@ class TestEventdevPerf(TestCase): self.blocklist = "" # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports() - if self.dut.get_os_type() == "linux": - # Get dut system information - port_num = self.dut_ports[0] - pci_device_id = self.dut.ports_info[port_num]["pci"] - ori_driver = self.dut.ports_info[port_num]["port"].get_nic_driver() - self.dut.ports_info[port_num]["port"].bind_driver() + self.sut_ports = self.sut_node.get_ports() + if self.sut_node.get_os_type() == "linux": + # Get SUT system information + port_num = self.sut_ports[0] + pci_device_id = self.sut_node.ports_info[port_num]["pci"] + ori_driver = self.sut_node.ports_info[port_num]["port"].get_nic_driver() + self.sut_node.ports_info[port_num]["port"].bind_driver() - self.dut.ports_info[port_num]["port"].bind_driver(ori_driver) + self.sut_node.ports_info[port_num]["port"].bind_driver(ori_driver) if self.nic == "cavium_a063": self.eventdev_device_bus_id = "0002:0e:00.0" self.eventdev_device_id = "a0f9" #### Bind evendev device #### - self.dut.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) + self.sut_node.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) #### Configuring evendev SS0 & SSOw limits #### - self.dut.set_eventdev_port_limits( + self.sut_node.set_eventdev_port_limits( self.eventdev_device_id, self.eventdev_device_bus_id ) self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["tcp"] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.build_eventdev_app() @@ -90,19 +90,19 @@ class TestEventdevPerf(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def suite_measure_throughput(self, tgen_input, rate_percent, delay): streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, rate_percent, None, self.tester.pktgen + tgen_input, rate_percent, None, self.tg_node.perf_tg ) - result = self.tester.pktgen.measure_throughput(stream_ids=streams) + result = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) return result def build_eventdev_app(self): - self.app_command = self.dut.apps_name["eventdev_pipeline"] - out = self.dut.build_dpdk_apps("examples/eventdev_pipeline") + self.app_command = self.sut_node.apps_name["eventdev_pipeline"] + out = self.sut_node.build_dpdk_apps("examples/eventdev_pipeline") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -117,7 +117,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids = [] command_line1 = self.app_command + " -l %s -a %s" for i in range(0, nports): - self.Port_pci_ids.append(self.dut.ports_info[i]["pci"]) + self.Port_pci_ids.append(self.sut_node.ports_info[i]["pci"]) ## Adding core-list and pci-ids command_line1 = command_line1 + " -a %s " ## Adding test and stage types @@ -133,19 +133,19 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -154,7 +154,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -172,7 +172,7 @@ class TestEventdevPerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -186,11 +186,11 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -199,7 +199,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -227,19 +227,19 @@ class TestEventdevPerf(TestCase): Evendev Performance Benchmarking with 1 ports with test_type=pipeline_atq and schedule_type=parallel. """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -248,7 +248,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -266,7 +266,7 @@ class TestEventdevPerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -280,11 +280,11 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -293,7 +293,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -322,19 +322,19 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -343,7 +343,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -361,7 +361,7 @@ class TestEventdevPerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -375,11 +375,11 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -388,7 +388,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -417,19 +417,19 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -438,7 +438,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -456,7 +456,7 @@ class TestEventdevPerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -470,11 +470,11 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -483,7 +483,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -512,19 +512,19 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -533,7 +533,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -551,7 +551,7 @@ class TestEventdevPerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -565,11 +565,11 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -578,7 +578,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -607,19 +607,19 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -628,7 +628,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -646,7 +646,7 @@ class TestEventdevPerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -660,11 +660,11 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -673,7 +673,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -702,26 +702,26 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -730,7 +730,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -749,7 +749,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -762,21 +762,21 @@ class TestEventdevPerf(TestCase): # create pcap file self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % ( os.sep.join([self.output_path, "event_test1.pcap"]), payload_size, ) ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % ( os.sep.join([self.output_path, "event_test2.pcap"]), payload_size, ) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -785,7 +785,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -814,26 +814,26 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -842,7 +842,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -861,7 +861,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -875,16 +875,16 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -893,7 +893,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -922,26 +922,26 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -950,7 +950,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -969,7 +969,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -983,16 +983,16 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1001,7 +1001,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1030,26 +1030,26 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -1058,7 +1058,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1077,7 +1077,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1091,16 +1091,16 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1109,7 +1109,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1138,26 +1138,26 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -1166,7 +1166,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1185,7 +1185,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1199,16 +1199,16 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1217,7 +1217,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1246,26 +1246,26 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -1274,7 +1274,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1293,7 +1293,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1307,16 +1307,16 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1325,7 +1325,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1354,40 +1354,40 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -1396,7 +1396,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1417,7 +1417,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1431,26 +1431,26 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1459,7 +1459,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1487,40 +1487,40 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -1529,7 +1529,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1550,7 +1550,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1564,26 +1564,26 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1592,7 +1592,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1621,40 +1621,40 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -1663,7 +1663,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1684,7 +1684,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1698,26 +1698,26 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1726,7 +1726,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1755,40 +1755,40 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -1797,7 +1797,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1818,7 +1818,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1832,26 +1832,26 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1860,7 +1860,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1889,40 +1889,40 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -1930,7 +1930,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1951,7 +1951,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1965,26 +1965,26 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1993,7 +1993,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -2022,40 +2022,40 @@ class TestEventdevPerf(TestCase): """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -2064,7 +2064,7 @@ class TestEventdevPerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -2085,7 +2085,7 @@ class TestEventdevPerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "Configured", 100) + self.sut_node.send_expect(command_line, "Configured", 100) info = "Executing Eventdev using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -2099,26 +2099,26 @@ class TestEventdevPerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -2127,7 +2127,7 @@ class TestEventdevPerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -2159,6 +2159,6 @@ class TestEventdevPerf(TestCase): """ Run after each test suite. """ - self.dut.send_expect("^C", "# ", 50) - self.dut.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) - self.dut.kill_all() + self.sut_node.send_expect("^C", "# ", 50) + self.sut_node.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) + self.sut_node.kill_all() diff --git a/tests/TestSuite_eventdev_pipeline.py b/tests/TestSuite_eventdev_pipeline.py index efd6605a..f81de864 100644 --- a/tests/TestSuite_eventdev_pipeline.py +++ b/tests/TestSuite_eventdev_pipeline.py @@ -14,7 +14,7 @@ import scapy.layers.inet from scapy.utils import rdpcap import framework.utils as utils -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -28,11 +28,11 @@ class TestEventdevPipeline(TestCase): self.core_config = "1S/8C/1T" self.build_eventdev_app() - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.verify(len(self.core_list) >= 8, "sever no enough cores to run this suite") @@ -47,11 +47,11 @@ class TestEventdevPipeline(TestCase): self.taskset_core_list = ",".join(self.core_list) - self.rx_port = self.tester.get_local_port(self.dut_ports[0]) + self.rx_port = self.tg_node.get_local_port(self.sut_ports[0]) self.tx_port = self.rx_port - self.rx_interface = self.tester.get_interface(self.rx_port) - self.tx_interface = self.tester.get_interface(self.tx_port) - self.d_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.rx_interface = self.tg_node.get_interface(self.rx_port) + self.tx_interface = self.tg_node.get_interface(self.tx_port) + self.d_mac = self.sut_node.get_mac_address(self.sut_ports[0]) def set_up(self): """ @@ -60,8 +60,8 @@ class TestEventdevPipeline(TestCase): pass def build_eventdev_app(self): - self.app_command = self.dut.apps_name["eventdev_pipeline"] - out = self.dut.build_dpdk_apps("examples/eventdev_pipeline") + self.app_command = self.sut_node.apps_name["eventdev_pipeline"] + out = self.sut_node.build_dpdk_apps("examples/eventdev_pipeline") # self.verify('make: Leaving directory' in out, "Compilation failed") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -70,8 +70,8 @@ class TestEventdevPipeline(TestCase): """ run eventdev_pipeline command """ - eal_params = self.dut.create_eal_parameters( - cores=self.core_list, ports=[self.dut.ports_info[0]["pci"]] + eal_params = self.sut_node.create_eal_parameters( + cores=self.core_list, ports=[self.sut_node.ports_info[0]["pci"]] ) command_line = ( "taskset -c %s " @@ -88,23 +88,23 @@ class TestEventdevPipeline(TestCase): self.core_mask_wk, cmd_type, ) - self.dut.send_expect(command_line, "Port 0", 30) + self.sut_node.send_expect(command_line, "Port 0", 30) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify("executing NIC Rx" in out, "lcore of rx not right") self.verify("executing NIC Tx" in out, "lcore of tx not right") self.verify("executing scheduler" in out, "lcore of scheduler not right") self.verify("executing worker" in out, "lcore of worker not right") def remove_dhcp_from_revpackets(self, inst, timeout=3): - pkts = self.tester.load_tcpdump_sniff_packets(inst, timeout) + scapy_pkts = self.tg_node.load_tcpdump_sniff_packets(inst, timeout) i = 0 - while len(pkts) != 0 and i <= len(pkts) - 1: - if pkts[i].haslayer("DHCP"): - pkts.pktgen.pkts.pop(i) + while len(scapy_pkts) != 0 and i <= len(scapy_pkts) - 1: + if scapy_pkts[i].haslayer("DHCP"): + scapy_pkts.scapy_pkt_util.pkts.pop(i) i = i - 1 i = i + 1 - return pkts + return scapy_pkts def send_ordered_packet(self, count=1): """ @@ -116,7 +116,7 @@ class TestEventdevPipeline(TestCase): if has eight flow, the pcap has 8 couples with diff 5 tuple, and each couple load info from 000001 to 000012 """ - pkt = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for queue in range(self.queues): config_opt = [ ("ether", {"dst": self.d_mac, "src": self.s_mac, "src": self.s_mac}), @@ -127,7 +127,7 @@ class TestEventdevPipeline(TestCase): # if multi queue, create self.packet_num with diff 5 tuple, # each tuple have (self.packet_num//self.queues) pkts pkt_num = self.packet_num // self.queues - pkt.generate_random_pkts( + scapy_pkt_builder.generate_random_pkts( pktnum=pkt_num, random_type=["UDP"], ip_increase=False, @@ -137,12 +137,12 @@ class TestEventdevPipeline(TestCase): # config raw info in pkts for i in range(pkt_num): payload = "0000%.2d" % (i + 1) - pkt.pktgen.pkts[i + pkt_num * queue]["Raw"].load = payload + scapy_pkt_builder.scapy_pkt_util.pkts[i + pkt_num * queue]["Raw"].load = payload filt = [{"layer": "ether", "config": {"src": "%s" % self.s_mac}}] - inst = self.tester.tcpdump_sniff_packets(self.rx_interface, filters=filt) - pkt.send_pkt( - crb=self.tester, tx_port=self.tx_interface, count=count, timeout=300 + inst = self.tg_node.tcpdump_sniff_packets(self.rx_interface, filters=filt) + scapy_pkt_builder.send_pkt( + node=self.tg_node, tx_port=self.tx_interface, count=count, timeout=300 ) self.pkts = self.remove_dhcp_from_revpackets(inst) @@ -155,8 +155,8 @@ class TestEventdevPipeline(TestCase): self.send_ordered_packet(count=100) # exit the eventdev_pipeline app # and get the output info - self.dut.send_expect("^c", "Signal") - out = self.dut.get_session_output(timeout=3) + self.sut_node.send_expect("^c", "Signal") + out = self.sut_node.get_session_output(timeout=3) work_rx = [] for wk in self.core_list_wk: one_info = re.search( @@ -299,7 +299,7 @@ class TestEventdevPipeline(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(5) def tear_down_all(self): diff --git a/tests/TestSuite_eventdev_pipeline_perf.py b/tests/TestSuite_eventdev_pipeline_perf.py index 1765e9fa..ba4f51d4 100644 --- a/tests/TestSuite_eventdev_pipeline_perf.py +++ b/tests/TestSuite_eventdev_pipeline_perf.py @@ -15,10 +15,10 @@ from time import sleep import framework.utils as utils import nics.perf_report as perf_report -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import FOLDERS, HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from nics.system_info import SystemInfo @@ -54,33 +54,33 @@ class TestEventdevPipelinePerf(TestCase): self.blocklist = "" # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports() - if self.dut.get_os_type() == "linux": - # Get dut system information - port_num = self.dut_ports[0] - pci_device_id = self.dut.ports_info[port_num]["pci"] - ori_driver = self.dut.ports_info[port_num]["port"].get_nic_driver() - self.dut.ports_info[port_num]["port"].bind_driver() + self.sut_ports = self.sut_node.get_ports() + if self.sut_node.get_os_type() == "linux": + # Get SUT system information + port_num = self.sut_ports[0] + pci_device_id = self.sut_node.ports_info[port_num]["pci"] + ori_driver = self.sut_node.ports_info[port_num]["port"].get_nic_driver() + self.sut_node.ports_info[port_num]["port"].bind_driver() - self.dut.ports_info[port_num]["port"].bind_driver(ori_driver) + self.sut_node.ports_info[port_num]["port"].bind_driver(ori_driver) if self.nic == "cavium_a063": self.eventdev_device_bus_id = "0002:0e:00.0" self.eventdev_device_id = "a0f9" #### Bind evendev device #### - self.dut.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) + self.sut_node.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) #### Configuring evendev SS0 & SSOw limits #### - self.dut.set_eventdev_port_limits( + self.sut_node.set_eventdev_port_limits( self.eventdev_device_id, self.eventdev_device_bus_id ) self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["tcp"] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.build_eventdev_app() @@ -90,19 +90,19 @@ class TestEventdevPipelinePerf(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def suite_measure_throughput(self, tgen_input, rate_percent, delay): streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, rate_percent, None, self.tester.pktgen + tgen_input, rate_percent, None, self.tg_node.perf_tg ) - result = self.tester.pktgen.measure_throughput(stream_ids=streams) + result = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) return result def build_eventdev_app(self): - self.app_command = self.dut.apps_name["eventdev_pipeline"] - out = self.dut.build_dpdk_apps("examples/eventdev_pipeline") + self.app_command = self.sut_node.apps_name["eventdev_pipeline"] + out = self.sut_node.build_dpdk_apps("examples/eventdev_pipeline") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -117,7 +117,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids = [] command_line1 = self.app_command + " -c %s -a %s" for i in range(0, nports): - self.Port_pci_ids.append(self.dut.ports_info[i]["pci"]) + self.Port_pci_ids.append(self.sut_node.ports_info[i]["pci"]) ## Adding core-list and pci-ids command_line1 = command_line1 + " -a %s " ## Adding test and stage types @@ -129,19 +129,19 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Performance Benchmarking with 1 ports. """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -150,7 +150,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -165,7 +165,7 @@ class TestEventdevPipelinePerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -179,11 +179,11 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -192,7 +192,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -220,19 +220,19 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Performance Benchmarking with 1 ports. """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -241,7 +241,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -256,7 +256,7 @@ class TestEventdevPipelinePerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -270,11 +270,11 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -283,7 +283,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -311,19 +311,19 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Performance Benchmarking with 1 ports. """ self.verify( - len(self.dut_ports) >= 1, "Insufficient ports for 1 ports performance test" + len(self.sut_ports) >= 1, "Insufficient ports for 1 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test.pcap"]), ) ) @@ -332,7 +332,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -347,7 +347,7 @@ class TestEventdevPipelinePerf(TestCase): self.eventdev_device_bus_id, self.Port_pci_ids[0], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -361,11 +361,11 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -374,7 +374,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -402,26 +402,26 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Performance Benchmarking with 2 ports. """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -430,7 +430,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -446,7 +446,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -460,16 +460,16 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -478,7 +478,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -506,26 +506,26 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline parallel schedule type Performance Benchmarking with 2 ports. """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -534,7 +534,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -550,7 +550,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -564,16 +564,16 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -582,7 +582,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -610,26 +610,26 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Order schedule type Performance Benchmarking with 2 ports. """ self.verify( - len(self.dut_ports) >= 2, "Insufficient ports for 2 ports performance test" + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) @@ -638,7 +638,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -654,7 +654,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids[0], self.Port_pci_ids[1], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -668,16 +668,16 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -686,7 +686,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -714,40 +714,40 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Performance Benchmarking with 4 ports. """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -756,7 +756,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -774,7 +774,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -788,26 +788,26 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -816,7 +816,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -844,40 +844,40 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline parallel schedule type Performance Benchmarking with 4 ports. """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -886,7 +886,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -904,7 +904,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -918,26 +918,26 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -946,7 +946,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -974,40 +974,40 @@ class TestEventdevPipelinePerf(TestCase): Evendev_Pipeline Order schedule type Performance Benchmarking with 4 ports. """ self.verify( - len(self.dut_ports) >= 4, "Insufficient ports for 4 ports performance test" + len(self.sut_ports) >= 4, "Insufficient ports for 4 ports performance test" ) self.perf_results["header"] = [] self.perf_results["data"] = [] - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), os.sep.join([self.output_path, "event_test1.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), os.sep.join([self.output_path, "event_test2.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), os.sep.join([self.output_path, "event_test3.pcap"]), ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), os.sep.join([self.output_path, "event_test4.pcap"]), ) ) @@ -1016,7 +1016,7 @@ class TestEventdevPipelinePerf(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( core_config, socket=self.ports_socket, from_last=self.get_cores_from_last, @@ -1034,7 +1034,7 @@ class TestEventdevPipelinePerf(TestCase): self.Port_pci_ids[2], self.Port_pci_ids[3], ) - self.dut.send_expect(command_line, "eventdev port 0", 100) + self.sut_node.send_expect(command_line, "eventdev port 0", 100) info = "Executing Eventdev_pipeline using %s\n" % test_cycle["cores"] self.logger.info(info) @@ -1048,26 +1048,26 @@ class TestEventdevPipelinePerf(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size pcap = os.sep.join([self.output_path, "event_test1.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test2.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:01")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test3.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:02")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) pcap = os.sep.join([self.output_path, "event_test4.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:03")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator _, pps = self.suite_measure_throughput(tgen_input, 100, 60) @@ -1076,7 +1076,7 @@ class TestEventdevPipelinePerf(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("^C", "# ", 50) + self.sut_node.send_expect("^C", "# ", 50) sleep(5) for n in range(len(self.test_cycles)): @@ -1108,6 +1108,6 @@ class TestEventdevPipelinePerf(TestCase): """ Run after each test suite. """ - self.dut.send_expect("^C", "# ", 50) - self.dut.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) - self.dut.kill_all() + self.sut_node.send_expect("^C", "# ", 50) + self.sut_node.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) + self.sut_node.kill_all() diff --git a/tests/TestSuite_example_build.py b/tests/TestSuite_example_build.py index 06680eba..bce0035c 100644 --- a/tests/TestSuite_example_build.py +++ b/tests/TestSuite_example_build.py @@ -30,12 +30,12 @@ class TestExamplebuild(TestCase): """ Verify example applications compile successfully """ - out = self.dut.send_expect("ls /root/intel-cmt-cat-master/lib", "#") + out = self.sut_node.send_expect("ls /root/intel-cmt-cat-master/lib", "#") if "No such file or directory" not in out: - self.dut.send_expect( + self.sut_node.send_expect( "export PQOS_INSTALL_PATH=/root/intel-cmt-cat-master/lib", "#" ) - out = self.dut.build_dpdk_apps("./examples", "#") + out = self.sut_node.build_dpdk_apps("./examples", "#") verify_info = [ "Error", "Stop", diff --git a/tests/TestSuite_external_memory.py b/tests/TestSuite_external_memory.py index 8163a7ec..782f21d5 100644 --- a/tests/TestSuite_external_memory.py +++ b/tests/TestSuite_external_memory.py @@ -20,10 +20,10 @@ class TestExternalMemory(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.pmdout = PmdOutput(self.dut) - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.pmdout = PmdOutput(self.sut_node) + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -36,39 +36,39 @@ class TestExternalMemory(TestCase): Insmod modProbe before run test case """ if modename == "igb_uio": - self.dut.send_expect("modprobe uio", "#", 10) - out = self.dut.send_expect("lsmod | grep igb_uio", "#") + self.sut_node.send_expect("modprobe uio", "#", 10) + out = self.sut_node.send_expect("lsmod | grep igb_uio", "#") if "igb_uio" in out: - self.dut.send_expect("rmmod -f igb_uio", "#", 10) - self.dut.send_expect( + self.sut_node.send_expect("rmmod -f igb_uio", "#", 10) + self.sut_node.send_expect( "insmod ./" + self.target + "/kmod/igb_uio.ko", "#", 10 ) - out = self.dut.send_expect("lsmod | grep igb_uio", "#") + out = self.sut_node.send_expect("lsmod | grep igb_uio", "#") assert "igb_uio" in out, "Failed to insmod igb_uio" - self.dut.bind_interfaces_linux(driver="igb_uio") + self.sut_node.bind_interfaces_linux(driver="igb_uio") if modename == "vfio-pci": - self.dut.send_expect("rmmod vfio_pci", "#", 10) - self.dut.send_expect("rmmod vfio_iommu_type1", "#", 10) - self.dut.send_expect("rmmod vfio", "#", 10) - self.dut.send_expect("modprobe vfio", "#", 10) - self.dut.send_expect("modprobe vfio_pci", "#", 10) - out = self.dut.send_expect("lsmod | grep vfio_iommu_type1", "#") + self.sut_node.send_expect("rmmod vfio_pci", "#", 10) + self.sut_node.send_expect("rmmod vfio_iommu_type1", "#", 10) + self.sut_node.send_expect("rmmod vfio", "#", 10) + self.sut_node.send_expect("modprobe vfio", "#", 10) + self.sut_node.send_expect("modprobe vfio_pci", "#", 10) + out = self.sut_node.send_expect("lsmod | grep vfio_iommu_type1", "#") if not out: - out = self.dut.send_expect("ls /sys/module |grep vfio_pci", "#") + out = self.sut_node.send_expect("ls /sys/module |grep vfio_pci", "#") assert "vfio_pci" in out, "Failed to insmod vfio_pci" - self.dut.bind_interfaces_linux(driver="vfio-pci") + self.sut_node.bind_interfaces_linux(driver="vfio-pci") def test_IGB_UIO_xmem(self): """ Verifier IGB_UIO and anonymous memory allocation """ self.insmod_modprobe(modename="igb_uio") - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + self.sut_node.send_expect( r"./%s %s -- --mp-alloc=xmem -i" % (self.app_testpmd_path, self.eal_para), "testpmd>", 60, @@ -80,8 +80,8 @@ class TestExternalMemory(TestCase): Verifier IGB_UIO and anonymous hugepage memory allocation """ self.insmod_modprobe(modename="igb_uio") - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + self.sut_node.send_expect( r"./%s %s -- --mp-alloc=xmemhuge -i" % (self.app_testpmd_path, self.eal_para), "testpmd>", @@ -94,14 +94,14 @@ class TestExternalMemory(TestCase): Verifier VFIO_PCI and anonymous memory allocation """ self.insmod_modprobe(modename="vfio-pci") - self.dut.send_expect( + self.sut_node.send_expect( "echo 655359 > /sys/module/vfio_iommu_type1/parameters/dma_entry_limit", "#", 10, ) - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + self.sut_node.send_expect( r"./%s %s -- --mp-alloc=xmem -i" % (self.app_testpmd_path, self.eal_para), "testpmd>", 60, @@ -115,8 +115,8 @@ class TestExternalMemory(TestCase): """ self.insmod_modprobe(modename="vfio-pci") - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + self.sut_node.send_expect( r"./%s %s -- --mp-alloc=xmemhuge -i" % (self.app_testpmd_path, self.eal_para), "testpmd>", @@ -126,39 +126,39 @@ class TestExternalMemory(TestCase): self.verifier_result() def verifier_result(self): - self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_ports[0]) + self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_ports[0]) self.scapy_send_packet(20) - out = self.dut.send_expect("stop", "testpmd>", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) p = re.compile(r"\d+") result = p.findall(out) - amount = 20 * len(self.dut_ports) + amount = 20 * len(self.sut_ports) self.verify(str(amount) in result, "Wrong: can't get <%d> package" % amount) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("quit", "#", 10) - self.dut.unbind_interfaces_linux(self.dut_ports) + self.sut_node.unbind_interfaces_linux(self.sut_ports) def scapy_send_packet(self, nu): """ Send a packet to port """ - for i in range(len(self.dut_ports)): - txport = self.tester.get_local_port(self.dut_ports[i]) - mac = self.dut.get_mac_address(self.dut_ports[i]) - txItf = self.tester.get_interface(txport) - self.tester.scapy_append( + for i in range(len(self.sut_ports)): + txport = self.tg_node.get_local_port(self.sut_ports[i]) + mac = self.sut_node.get_mac_address(self.sut_ports[i]) + txItf = self.tg_node.get_interface(txport) + self.tg_node.scapy_append( "sendp([Ether()/IP()/UDP()/Raw('X'*18)], iface=\"%s\",count=%s)" % (txItf, nu) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) pass @@ -166,5 +166,5 @@ class TestExternalMemory(TestCase): """ Run after each test suite. """ - self.dut.bind_interfaces_linux(driver=self.drivername) + self.sut_node.bind_interfaces_linux(driver=self.drivername) pass diff --git a/tests/TestSuite_external_mempool_handler.py b/tests/TestSuite_external_mempool_handler.py index aa22a35a..5c39f5ce 100644 --- a/tests/TestSuite_external_mempool_handler.py +++ b/tests/TestSuite_external_mempool_handler.py @@ -17,13 +17,13 @@ class TestExternalMempool(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.verify(len(self.sut_ports) >= 2, "Not enough ports") - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) - self.app_test_path = self.dut.apps_name["test"] + self.app_test_path = self.sut_node.apps_name["test"] def set_up(self): """ @@ -32,19 +32,19 @@ class TestExternalMempool(TestCase): pass def verify_unit_func(self, ops=""): - self.dut.send_expect( + self.sut_node.send_expect( "./%s -n 4 -c f --mbuf-pool-ops-name %s" % (self.app_test_path, ops), "R.*T.*E.*>.*>", 60, ) - out = self.dut.send_expect("mempool_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("mempool_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Mempool autotest failed") def verify_unit_perf(self): - self.dut.send_expect("./%s -n 4 -c f" % self.app_test_path, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("mempool_perf_autotest", "RTE>>", 1200) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("./%s -n 4 -c f" % self.app_test_path, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("mempool_perf_autotest", "RTE>>", 1200) + self.sut_node.send_expect("quit", "# ") # may need to compare performance self.verify("Test OK" in out, "Mempool performance autotest failed") @@ -55,15 +55,15 @@ class TestExternalMempool(TestCase): self.pmdout.execute_cmd("start") tgen_input = [] - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_input.append((tx_port, rx_port)) - tx_port = self.tester.get_local_port(self.dut_ports[1]) - rx_port = self.tester.get_local_port(self.dut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[1]) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) tgen_input.append((tx_port, rx_port)) - result = self.tester.check_random_pkts(tgen_input, allow_miss=False) + result = self.tg_node.check_random_pkts(tgen_input, allow_miss=False) self.pmdout.quit() @@ -108,7 +108,7 @@ class TestExternalMempool(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() pass def tear_down_all(self): diff --git a/tests/TestSuite_fips_cryptodev.py b/tests/TestSuite_fips_cryptodev.py index c84a047f..6c645224 100644 --- a/tests/TestSuite_fips_cryptodev.py +++ b/tests/TestSuite_fips_cryptodev.py @@ -11,12 +11,12 @@ from framework.test_case import TestCase class FipCryptodev(TestCase): def set_up_all(self): - out = self.dut.build_dpdk_apps("./examples/fips_validation") + out = self.sut_node.build_dpdk_apps("./examples/fips_validation") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") self.vf_driver = self.get_suite_cfg()["vf_driver"] cc.bind_qat_device(self, "vfio-pci") - self._app_path = self.dut.apps_name["fips_validation"] + self._app_path = self.sut_node.apps_name["fips_validation"] self._default_fips_opts = { "req-file": None, "rsp-file": None, @@ -31,7 +31,7 @@ class FipCryptodev(TestCase): pass def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): pass @@ -44,7 +44,7 @@ class FipCryptodev(TestCase): cmd_str = cc.get_dpdk_app_cmd_str(self._app_path, eal_opt_str, fips_opt_str) self.logger.info(cmd_str) try: - out = self.dut.send_expect(cmd_str, "#", 600) + out = self.sut_node.send_expect(cmd_str, "#", 600) except Exception as ex: self.logger.error(ex) raise ex @@ -57,7 +57,7 @@ class FipCryptodev(TestCase): rep_list = re.findall(r"FIPS/(.*)/req/(.*).req", out) for alog_name, file_name in rep_list: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "diff %s/%s/resp/%s.rsp %s/%s/fax/%s.rsp | grep -v '#' | grep -v '\---'" % ( self.FIP_path, diff --git a/tests/TestSuite_firmware_version.py b/tests/TestSuite_firmware_version.py index a5215c40..2634061a 100644 --- a/tests/TestSuite_firmware_version.py +++ b/tests/TestSuite_firmware_version.py @@ -17,9 +17,9 @@ class TestFirmwareVersion(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports() + self.ports = self.sut_node.get_ports() - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ @@ -70,7 +70,7 @@ class TestFirmwareVersion(TestCase): expected_version_info = expected_version_list[self.kdriver] for port in self.ports: - out = self.dut.send_expect(f"show port info {port}", "testpmd> ") + out = self.sut_node.send_expect(f"show port info {port}", "testpmd> ") self.verify("Firmware-version:" in out, "Firmware version not detected") version_info = self.pmdout.get_firmware_version(port) @@ -147,10 +147,10 @@ class TestFirmwareVersion(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_flexible_rxd.py b/tests/TestSuite_flexible_rxd.py index 06ea96ce..90820ce8 100644 --- a/tests/TestSuite_flexible_rxd.py +++ b/tests/TestSuite_flexible_rxd.py @@ -23,41 +23,41 @@ class TestFlexibleRxd(TestCase, FlexibleRxdBase): Modify the dpdk code. """ cmds = [ - "cd " + self.dut.base_dir, + "cd " + self.sut_node.base_dir, "cp ./app/test-pmd/util.c .", r"""sed -i "/if dpdk_conf.has('RTE_NET_IXGBE')/i\if dpdk_conf.has('RTE_NET_ICE')\n\tdeps += 'net_ice'\nendif" app/test-pmd/meson.build""", "sed -i '/#include /a\#include ' app/test-pmd/util.c", "sed -i '/if (is_timestamp_enabled(mb))/i\ rte_net_ice_dump_proto_xtr_metadata(mb);' app/test-pmd/util.c", ] - [self.dut.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] - self.dut.build_install_dpdk(self.dut.target) + [self.sut_node.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] + self.sut_node.build_install_dpdk(self.sut_node.target) def restore_compilation(self): """ Resume editing operation. """ cmds = [ - "cd " + self.dut.base_dir, + "cd " + self.sut_node.base_dir, "cp ./util.c ./app/test-pmd/", "sed -i '/pmd_ice/d' app/test-pmd/meson.build", "rm -rf ./util.c", ] - [self.dut.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] - self.dut.build_install_dpdk(self.dut.target) + [self.sut_node.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] + self.sut_node.build_install_dpdk(self.sut_node.target) @check_supported_nic(supported_nic) def set_up_all(self): """ run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.ports_socket) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("1S/3C/1T", socket=self.ports_socket) self.verify(len(self.cores) >= 3, "Insufficient cpu cores for testing") self.preset_compilation() - self.pci = self.dut.ports_info[0]["pci"] - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.pci = self.sut_node.ports_info[0]["pci"] + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.init_base(self.pci, self.dst_mac, "pf") def tear_down_all(self): @@ -78,7 +78,7 @@ class TestFlexibleRxd(TestCase, FlexibleRxdBase): """ self.close_testpmd() time.sleep(2) - self.dut.kill_all() + self.sut_node.kill_all() @skip_unsupported_pkg("os default") def test_check_single_VLAN_fields_in_RXD_8021Q(self): diff --git a/tests/TestSuite_floating_veb.py b/tests/TestSuite_floating_veb.py index aabb9c27..22387b23 100644 --- a/tests/TestSuite_floating_veb.py +++ b/tests/TestSuite_floating_veb.py @@ -11,14 +11,14 @@ Test Floating VEB Features by Poll Mode Drivers. import re import time -from framework.dut import Dut -from framework.packet import Packet from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase from framework.utils import RED -from framework.virt_dut import VirtDut +from framework.virt_sut import VirtSut class TestFloatingVEBSwitching(TestCase): @@ -50,7 +50,7 @@ class TestFloatingVEBSwitching(TestCase): tx_bytes_prefix = "TX-bytes:" if dev == "first": - out = self.dut.send_expect("show port stats %d" % portid, "testpmd> ") + out = self.sut_node.send_expect("show port stats %d" % portid, "testpmd> ") elif dev == "second": out = self.session_secondary.send_expect( "show port stats %d" % portid, "testpmd> " @@ -88,19 +88,19 @@ class TestFloatingVEBSwitching(TestCase): """ Send 1 packet """ - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) if tran_type == "vlan": - pkt = Packet(pkt_type="VLAN_UDP") - pkt.config_layer("ether", {"dst": vf_mac}) - pkt.config_layer("vlan", {"vlan": 1}) - pkt.send_pkt(self.tester, tx_port=itf) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") + scapy_pkt_builder.config_layer("ether", {"dst": vf_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 1}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) time.sleep(0.5) else: - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": vf_mac}) - pkt.send_pkt(self.tester, tx_port=itf) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": vf_mac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) time.sleep(0.5) # Test cases. @@ -119,13 +119,13 @@ class TestFloatingVEBSwitching(TestCase): ], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() - self.pmdout = PmdOutput(self.dut) - self.pmdout_2 = PmdOutput(self.dut, self.session_secondary) - self.pmdout_3 = PmdOutput(self.dut, self.session_third) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() + self.pmdout = PmdOutput(self.sut_node) + self.pmdout_2 = PmdOutput(self.sut_node, self.session_secondary) + self.pmdout_3 = PmdOutput(self.sut_node, self.session_third) self.setup_1pf_ddriver_1vf_env_flag = 0 self.setup_1pf_ddriver_2vf_env_flag = 0 @@ -135,25 +135,25 @@ class TestFloatingVEBSwitching(TestCase): self.vf2_mac = "00:11:22:33:44:13" self.vf3_mac = "00:11:22:33:44:14" - self.used_dut_port = self.dut_ports[0] - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.pf_interface = self.dut.ports_info[self.used_dut_port]["intf"] - self.pf_mac_address = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.used_dut_port]["pci"] - self.path = self.dut.apps_name["test-pmd"] + self.used_sut_port = self.sut_ports[0] + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.pf_interface = self.sut_node.ports_info[self.used_sut_port]["intf"] + self.pf_mac_address = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.used_sut_port]["pci"] + self.path = self.sut_node.apps_name["test-pmd"] - self.dut.init_reserved_core() - self.cores_vf0 = self.dut.get_reserved_core("2C", 0) - self.cores_vf1 = self.dut.get_reserved_core("2C", 0) - self.cores_vf2 = self.dut.get_reserved_core("2C", 0) - self.cores_vf3 = self.dut.get_reserved_core("2C", 0) + self.sut_node.init_reserved_core() + self.cores_vf0 = self.sut_node.get_reserved_core("2C", 0) + self.cores_vf1 = self.sut_node.get_reserved_core("2C", 0) + self.cores_vf2 = self.sut_node.get_reserved_core("2C", 0) + self.cores_vf3 = self.sut_node.get_reserved_core("2C", 0) def set_up(self): """ This is to clear up environment before the case run. """ - self.dut.kill_all() + self.sut_node.kill_all() def setup_env(self, driver, vf_num): """ @@ -161,8 +161,8 @@ class TestFloatingVEBSwitching(TestCase): dpdk driver, and nvfs(1,2,4)bond to dpdk driver. """ - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, vf_num, driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, vf_num, driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] try: for port in self.sriov_vfs_port: @@ -186,9 +186,9 @@ class TestFloatingVEBSwitching(TestCase): time.sleep(2) self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) if vf_num == 1: self.setup_1pf_ddriver_1vf_env_flag = 0 elif vf_num == 2: @@ -210,9 +210,9 @@ class TestFloatingVEBSwitching(TestCase): ports=[self.pf_pci], port_options={self.pf_pci: "enable_floating_veb=1"}, ) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( "Default", @@ -226,7 +226,7 @@ class TestFloatingVEBSwitching(TestCase): time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf0_tx_stats = self.veb_get_pmd_stats("second", 0, "tx") pf_rx_stats = self.veb_get_pmd_stats("first", 0, "rx") @@ -237,7 +237,7 @@ class TestFloatingVEBSwitching(TestCase): ) self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) # PF->VF @@ -248,8 +248,8 @@ class TestFloatingVEBSwitching(TestCase): port_options={self.pf_pci: "enable_floating_veb=1"}, param="--eth-peer=0,%s" % self.vf0_mac, ) - self.dut.send_expect("set fwd txonly", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set fwd txonly", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") self.pmdout_2.start_testpmd( "Default", prefix="test2", ports=[self.sriov_vfs_port[0].pci] @@ -261,10 +261,10 @@ class TestFloatingVEBSwitching(TestCase): self.session_secondary.send_expect("set promisc all off", "testpmd>") self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) vf0_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") @@ -275,10 +275,10 @@ class TestFloatingVEBSwitching(TestCase): "VF0 can receive packet from PF, the floating VEB doesn't work", ) - def test_floating_VEB_inter_tester_vf(self): + def test_floating_VEB_inter_tg_vf(self): """ DPDK PF, then create 1VF, PF in the host running dpdk testpmd, - send traffic from tester to VF0. + send traffic from TG to VF0. In floating modeVF0 can't receive any packets; """ # outside world ->VF @@ -290,9 +290,9 @@ class TestFloatingVEBSwitching(TestCase): port_options={self.pf_pci: "enable_floating_veb=1"}, param="--eth-peer=0,%s" % self.vf0_mac, ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all on", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all on", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( @@ -306,9 +306,9 @@ class TestFloatingVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.send_packet(self.vf0_mac, self.tester_itf) + self.send_packet(self.vf0_mac, self.tg_itf) time.sleep(2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) vf0_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") @@ -333,7 +333,7 @@ class TestFloatingVEBSwitching(TestCase): ports=[self.pf_pci], port_options={self.pf_pci: "enable_floating_veb=1"}, ) - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") time.sleep(2) # start VF0 self.pmdout_2.start_testpmd( @@ -371,15 +371,15 @@ class TestFloatingVEBSwitching(TestCase): ) # PF link down, VF0 -> VF1 - self.dut.send_expect("port stop all", "testpmd>", 10) - self.dut.send_expect("show port info 0", "Link status: down", 10) + self.sut_node.send_expect("port stop all", "testpmd>", 10) + self.sut_node.send_expect("show port info 0", "Link status: down", 10) self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) self.session_third.send_expect("start", "testpmd>") time.sleep(2) self.session_third.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf1_tx_stats_pfstop = self.veb_get_pmd_stats("third", 0, "tx") vf0_rx_stats_pfstop = self.veb_get_pmd_stats("second", 0, "rx") @@ -413,8 +413,8 @@ class TestFloatingVEBSwitching(TestCase): + '-c 0xf -n 4 --socket-mem 1024,1024 -a "%s,enable_floating_veb=1,floating_veb_list=0;2-3" --file-prefix=test1 -- -i' % self.pf_pci ) - self.dut.send_expect(cmd, "testpmd> ", 120) - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect(cmd, "testpmd> ", 120) + self.sut_node.send_expect("port start all", "testpmd>") time.sleep(2) # VF1->VF0 # start VF0 @@ -443,8 +443,8 @@ class TestFloatingVEBSwitching(TestCase): self.session_third.send_expect("set promisc all off", "testpmd>") # PF link down - self.dut.send_expect("port stop all", "testpmd>", 30) - self.dut.send_expect("show port info 0", "Link status: down", 10) + self.sut_node.send_expect("port stop all", "testpmd>", 30) + self.sut_node.send_expect("show port info 0", "Link status: down", 10) self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) self.session_third.send_expect("start", "testpmd>") @@ -483,7 +483,7 @@ class TestFloatingVEBSwitching(TestCase): # VF0->VF2 # start VF0 - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") self.pmdout_2.start_testpmd( self.cores_vf0, prefix="test2", @@ -504,8 +504,8 @@ class TestFloatingVEBSwitching(TestCase): self.session_third.send_expect("set promisc all off", "testpmd>") # PF link down - self.dut.send_expect("port stop all", "testpmd>", 30) - self.dut.send_expect("show port info 0", "Link status: down", 10) + self.sut_node.send_expect("port stop all", "testpmd>", 30) + self.sut_node.send_expect("show port info 0", "Link status: down", 10) self.session_third.send_expect("start", "testpmd>") time.sleep(2) self.session_secondary.send_expect("start", "testpmd>") @@ -531,7 +531,7 @@ class TestFloatingVEBSwitching(TestCase): # VF3->VF2 # start VF3 - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") self.pmdout_2.start_testpmd( self.cores_vf3, prefix="test2", @@ -552,8 +552,8 @@ class TestFloatingVEBSwitching(TestCase): self.session_third.send_expect("set promisc all off", "testpmd>") # PF link down - self.dut.send_expect("port stop all", "testpmd>", 30) - self.dut.send_expect("show port info 0", "Link status: down", 10) + self.sut_node.send_expect("port stop all", "testpmd>", 30) + self.sut_node.send_expect("show port info 0", "Link status: down", 10) self.session_third.send_expect("start", "testpmd>") time.sleep(2) self.session_secondary.send_expect("start", "testpmd>") @@ -579,10 +579,10 @@ class TestFloatingVEBSwitching(TestCase): 1. Send traffic from VF0 to PF, then check PF will not see any traffic; 2. Send traffic from VF1 to PF, then check PF will receive all the packets. - 3. send traffic from tester to VF0, check VF0 can't receive traffic - from tester. - 4. send traffic from tester to VF1, check VF1 can receive all the - traffic from tester. + 3. send traffic from TG to VF0, check VF0 can't receive traffic + from TG. + 4. send traffic from TG to VF1, check VF1 can receive all the + traffic from TG. 5. send traffic from VF1 to VF2, check VF2 can receive all the traffic from VF1. """ @@ -593,10 +593,10 @@ class TestFloatingVEBSwitching(TestCase): + '-c 0xf -n 4 --socket-mem 1024,1024 -a "%s,enable_floating_veb=1,floating_veb_list=0;3" --file-prefix=test1 -- -i' % self.pf_pci ) - self.dut.send_expect(cmd, "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect(cmd, "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( "Default", @@ -610,7 +610,7 @@ class TestFloatingVEBSwitching(TestCase): time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf0_tx_stats = self.veb_get_pmd_stats("second", 0, "tx") pf_rx_stats = self.veb_get_pmd_stats("first", 0, "rx") @@ -631,13 +631,13 @@ class TestFloatingVEBSwitching(TestCase): ) self.session_secondary.send_expect("set fwd txonly", "testpmd>") self.session_secondary.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf1_tx_stats = self.veb_get_pmd_stats("second", 0, "tx") pf_rx_stats = self.veb_get_pmd_stats("first", 0, "rx") @@ -653,10 +653,10 @@ class TestFloatingVEBSwitching(TestCase): self.session_secondary.send_expect("quit", "# ") time.sleep(2) - # tester->VF0 - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + # TG->VF0 + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( @@ -670,10 +670,10 @@ class TestFloatingVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.send_packet(self.vf0_mac, self.tester_itf) + self.send_packet(self.vf0_mac, self.tg_itf) time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf0_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") self.verify( @@ -683,7 +683,7 @@ class TestFloatingVEBSwitching(TestCase): self.session_secondary.send_expect("quit", "# ") time.sleep(2) - # tester->VF1 + # TG->VF1 self.pmdout_2.start_testpmd( "Default", prefix="test2", ports=[self.sriov_vfs_port[1].pci] ) @@ -695,10 +695,10 @@ class TestFloatingVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.send_packet(self.vf1_mac, self.tester_itf) + self.send_packet(self.vf1_mac, self.tg_itf) time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf1_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") self.verify( @@ -769,8 +769,8 @@ class TestFloatingVEBSwitching(TestCase): self.session_third.send_expect("set fwd txonly", "testpmd>") self.session_third.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("port stop all", "testpmd>", 10) - self.dut.send_expect("show port info 0", "Link status: down", 10) + self.sut_node.send_expect("port stop all", "testpmd>", 10) + self.sut_node.send_expect("show port info 0", "Link status: down", 10) self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) self.session_third.send_expect("start", "testpmd>") @@ -802,16 +802,16 @@ class TestFloatingVEBSwitching(TestCase): if self.setup_1pf_ddriver_4vf_env_flag == 1: self.destroy_env(4) - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) - # Marvin recommended that all the dut ports should be bound to igb_uio. - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + self.sut_node.kill_all() + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) + # Marvin recommended that all the SUT ports should be bound to igb_uio. + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver(driver=self.drivername) diff --git a/tests/TestSuite_flow_classify.py b/tests/TestSuite_flow_classify.py index ad0cd410..88e5e6a9 100644 --- a/tests/TestSuite_flow_classify.py +++ b/tests/TestSuite_flow_classify.py @@ -12,15 +12,15 @@ from functools import reduce from scapy.sendrecv import sendp from framework.exception import VerifyFailure -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase from framework.utils import create_mask as dts_create_mask class TestFlowClassify(TestCase): - def is_existed_on_crb(self, check_path, crb="dut"): - alt_session = self.dut.alt_session if crb == "dut" else self.tester.alt_session + def is_existed_on_node(self, check_path, node="sut"): + alt_session = self.sut_node.alt_session if node == "sut" else self.tg_node.alt_session alt_session.send_expect("ls %s > /dev/null 2>&1" % check_path, "# ") cmd = "echo $?" output = alt_session.send_expect(cmd, "# ") @@ -28,10 +28,10 @@ class TestFlowClassify(TestCase): return ret def get_cores_mask(self, config="all"): - sockets = [self.dut.get_numa_id(index) for index in self.dut_ports] + sockets = [self.sut_node.get_numa_id(index) for index in self.sut_ports] socket_count = Counter(sockets) port_socket = list(socket_count.keys())[0] if len(socket_count) == 1 else -1 - mask = dts_create_mask(self.dut.get_core_list(config, socket=port_socket)) + mask = dts_create_mask(self.sut_node.get_core_list(config, socket=port_socket)) return mask @property @@ -48,25 +48,25 @@ class TestFlowClassify(TestCase): return output_path def get_ixia_peer_port(self): - for cnt in self.dut_ports: - if self.tester.get_local_port_type(cnt) != "ixia": + for cnt in self.sut_ports: + if self.tg_node.get_local_port_type(cnt) != "ixia": continue - tester_port = self.tester.get_local_port(cnt) - return tester_port + tg_port = self.tg_node.get_local_port(cnt) + return tg_port def d_console(self, cmds): - return self.execute_cmds(cmds, con_name="dut") + return self.execute_cmds(cmds, con_name="sut") def d_a_console(self, cmds): - return self.execute_cmds(cmds, con_name="dut_alt") + return self.execute_cmds(cmds, con_name="sut_alt") def get_console(self, name): - if name == "dut": - console = self.dut.send_expect - msg_pipe = self.dut.get_session_output - elif name == "dut_alt": - console = self.dut.alt_session.send_expect - msg_pipe = self.dut.alt_session.session.get_output_all + if name == "sut": + console = self.sut_node.send_expect + msg_pipe = self.sut_node.get_session_output + elif name == "sut_alt": + console = self.sut_node.alt_session.send_expect + msg_pipe = self.sut_node.alt_session.session.get_output_all else: msg = "not support <{}> session".format(name) raise VerifyFailure(msg) @@ -225,19 +225,19 @@ class TestFlowClassify(TestCase): savePath = os.sep.join([self.output_path, "pkt_{0}.pcap".format(stm_name)]) pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - pkt.pktgen.pkt.show() - streams.append(pkt.pktgen.pkt) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + scapy_pkt_builder.scapy_pkt_util.pkt.show() + streams.append(scapy_pkt_builder.scapy_pkt_util.pkt) return streams def send_packet_by_scapy(self, config): tx_iface = config.get("tx_intf") cmd = "ifconfig {0} up".format(tx_iface) - self.tester.send_expect(cmd, "# ", 30) + self.tg_node.send_expect(cmd, "# ", 30) self.verify( - self.tester.is_interface_up(intf=tx_iface), + self.tg_node.is_interface_up(intf=tx_iface), "Wrong link status, should be up", ) pkts = config.get("stream") @@ -262,19 +262,19 @@ class TestFlowClassify(TestCase): def target_dir(self): """get absolute directory of target source code""" target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @property def target_name(self): - return self.dut.target + return self.sut_node.target def prepare_binary(self, name): example_dir = "examples/" + name - out = self.dut.build_dpdk_apps("./" + example_dir) + out = self.sut_node.build_dpdk_apps("./" + example_dir) self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") binary_dir = os.path.join(self.target_dir, example_dir, "build") @@ -288,10 +288,10 @@ class TestFlowClassify(TestCase): rule_config = os.sep.join( [self.target_dir, "examples", "flow_classify", "ipv4_rules_file.txt"] ) - if not self.is_existed_on_crb(rule_config): + if not self.is_existed_on_node(rule_config): raise VerifyFailure("rules file doesn't existed") core = "1S/1C/1T" - eal_params = self.dut.create_eal_parameters() + eal_params = self.sut_node.create_eal_parameters() # option = r" -c {0} - n 4 --file-prefix=test {1} -- --rule_ipv4={2}".format(self.get_cores_mask(core),eal_params,rule_config) option = r" {0} -- --rule_ipv4={1}".format(eal_params, rule_config) prompt = "table_entry_delete succeeded" @@ -300,7 +300,7 @@ class TestFlowClassify(TestCase): return output def close_flow_classify(self): - output = self.dut.get_session_output() + output = self.sut_node.get_session_output() dt = datetime.now() timestamp = dt.strftime("%Y-%m-%d_%H%M%S") self.test_data = "{0}/{1}_{2}.log".format( @@ -326,7 +326,7 @@ class TestFlowClassify(TestCase): tx_port = ( self.get_ixia_peer_port() if pktgen_name == "ixia" - else self.tester.get_interface(self.tester.get_local_port(0)) + else self.tg_node.get_interface(self.tg_node.get_local_port(0)) ) # set traffic configuration ports_topo = { @@ -456,8 +456,8 @@ class TestFlowClassify(TestCase): supported_drivers = ["i40e", "ixgbe", "igc", "igb", "ice"] result = all( [ - self.dut.ports_info[index]["port"].default_driver in supported_drivers - for index in self.dut_ports + self.sut_node.ports_info[index]["port"].default_driver in supported_drivers + for index in self.sut_ports ] ) msg = "current nic is not supported" @@ -472,11 +472,11 @@ class TestFlowClassify(TestCase): Run before each test suite """ # initialize ports topology - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") # set binary process setting self.prepare_binary("flow_classify") - self.flow_classify = self.dut.apps_name["flow_classify"] + self.flow_classify = self.sut_node.apps_name["flow_classify"] self.verify_supported_nic() def set_up(self): diff --git a/tests/TestSuite_flow_classify_softnic.py b/tests/TestSuite_flow_classify_softnic.py index a3a0eed5..4a06e62f 100644 --- a/tests/TestSuite_flow_classify_softnic.py +++ b/tests/TestSuite_flow_classify_softnic.py @@ -20,43 +20,42 @@ from scapy.sendrecv import sendp, sniff from scapy.utils import hexstr, rdpcap, wrpcap import framework.utils as utils -from framework.crb import Crb -from framework.dut import Dut from framework.exception import VerifyFailure -from framework.packet import Packet +from framework.node import Node from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut from framework.settings import DRIVERS, HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase -from framework.virt_dut import VirtDut +from framework.virt_sut import VirtSut class TestFlowClassifySoftnic(TestCase): - def copy_config_files_to_dut(self): + def copy_config_files_to_sut(self): """ - Copy firmware.cli from tester to DUT. + Copy firmware.cli from TG to SUT. """ file = "flow_classify_softnic.tar.gz" src_file = r"./dep/%s" % file dst1 = "/tmp" dst2 = "/root/dpdk/drivers/net/softnic" - self.dut.session.copy_file_to(src_file, dst1) - self.dut.send_expect("tar xf %s/%s -C %s" % (dst1, file, dst2), "#", 30) + self.sut_node.session.copy_file_to(src_file, dst1) + self.sut_node.send_expect("tar xf %s/%s -C %s" % (dst1, file, dst2), "#", 30) def start_testpmd(self, filename, port_num): """ Start testpmd. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") self.set_ports(filename, port_num) - TESTPMD = self.dut.apps_name["test-pmd"] - cmd = "cat /sys/bus/pci/devices/%s/numa_node" % self.dut_p0_pci - numa_node = int(self.dut.send_expect(cmd, "# ", 60)) + TESTPMD = self.sut_node.apps_name["test-pmd"] + cmd = "cat /sys/bus/pci/devices/%s/numa_node" % self.sut_p0_pci + numa_node = int(self.sut_node.send_expect(cmd, "# ", 60)) cpu_id = numa_node if numa_node > 0 else 0 ports_info = [] for i in range(port_num): ports_info.append(i) - eal_params = self.dut.create_eal_parameters(cores=self.cores, ports=ports_info) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores, ports=ports_info) VDEV = ( "--vdev 'net_softnic0,firmware=./drivers/net/softnic/flow_classify_softnic/%s,cpu_id=%s,conn_port=8086'" % (filename, cpu_id) @@ -71,13 +70,13 @@ class TestFlowClassifySoftnic(TestCase): ) else: raise Exception("The number of port is wrong!") - self.dut.send_expect(cmd, "testpmd> ", 60) + self.sut_node.send_expect(cmd, "testpmd> ", 60) def set_ports(self, filename, port_num): """ Set actual ports. """ - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/^link LINK/d' ./drivers/net/softnic/flow_classify_softnic/%s" % filename, "# ", @@ -85,26 +84,26 @@ class TestFlowClassifySoftnic(TestCase): ) cmd = ( "sed -i '1i\link LINK0 dev %s' ./drivers/net/softnic/flow_classify_softnic/%s" - % (self.dut_p0_pci, filename) + % (self.sut_p0_pci, filename) ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '2i\link LINK1 dev %s' ./drivers/net/softnic/flow_classify_softnic/%s" - % (self.dut_p1_pci, filename) + % (self.sut_p1_pci, filename) ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) if port_num == 4: cmd = ( "sed -i '3i\link LINK2 dev %s' ./drivers/net/softnic/flow_classify_softnic/%s" - % (self.dut_p2_pci, filename) + % (self.sut_p2_pci, filename) ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '4i\link LINK3 dev %s' ./drivers/net/softnic/flow_classify_softnic/%s" - % (self.dut_p3_pci, filename) + % (self.sut_p3_pci, filename) ) - self.dut.send_expect(cmd, "# ", 20) - self.dut.send_expect( + self.sut_node.send_expect(cmd, "# ", 20) + self.sut_node.send_expect( "sed -i 's/^thread 4 pipeline/thread %d pipeline/g' ./drivers/net/softnic/flow_classify_softnic/%s" % (self.port_num, filename), "# ", @@ -115,7 +114,7 @@ class TestFlowClassifySoftnic(TestCase): """ Set pipeline table. """ - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/^pipeline RX table match/d' ./drivers/net/softnic/flow_classify_softnic/%s" % filename, "# ", @@ -126,7 +125,7 @@ class TestFlowClassifySoftnic(TestCase): + cmd + "' ./drivers/net/softnic/flow_classify_softnic/%s" % filename ) - self.dut.send_expect(command, "# ", 20) + self.sut_node.send_expect(command, "# ", 20) def get_flow_direction_param_of_tcpdump(self): """ @@ -134,7 +133,7 @@ class TestFlowClassifySoftnic(TestCase): """ param = "" direct_param = r"(\s+)\[ (\S+) in\|out\|inout \]" - out = self.tester.send_expect("tcpdump -h", "# ", trim_whitespace=False) + out = self.tg_node.send_expect("tcpdump -h", "# ", trim_whitespace=False) for line in out.split("\n"): m = re.match(direct_param, line) if m: @@ -154,20 +153,20 @@ class TestFlowClassifySoftnic(TestCase): Starts tcpdump in the background to sniff packets that received by interface. """ command = "rm -f /tmp/tcpdump_{0}.pcap".format(interface) - self.tester.send_expect(command, "#") + self.tg_node.send_expect(command, "#") command = "tcpdump -n -e {0} -w /tmp/tcpdump_{1}.pcap -i {1} {2} 2>/tmp/tcpdump_{1}.out &".format( self.param_flow_dir, interface, filters ) - self.tester.send_expect(command, "# ") + self.tg_node.send_expect(command, "# ") def tcpdump_stop_sniff(self): """ Stops the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "# ") + self.tg_node.send_expect("killall tcpdump", "# ") # For the [pid]+ Done tcpdump... message after killing the process sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "# ") + self.tg_node.send_expect('echo "Cleaning buffer"', "# ") sleep(3) def write_pcap_file(self, pcap_file, pkts): @@ -191,27 +190,27 @@ class TestFlowClassifySoftnic(TestCase): Return the sniff pkts. """ self.pmdout.wait_link_status_up("all") - tx_port = self.tester.get_local_port(self.dut_ports[from_port % self.port_num]) - rx_port = self.tester.get_local_port(self.dut_ports[to_port % self.port_num]) + tx_port = self.tg_node.get_local_port(self.sut_ports[from_port % self.port_num]) + rx_port = self.tg_node.get_local_port(self.sut_ports[to_port % self.port_num]) - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) - # check tester's link status before send packet + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) + # check TG's link status before send packet for iface in [tx_interface, rx_interface]: self.verify( - self.tester.is_interface_up(intf=iface), + self.tg_node.is_interface_up(intf=iface), "Wrong link status, should be up", ) self.tcpdump_start_sniff(rx_interface, filters) # Prepare the pkts to be sent - self.tester.scapy_foreground() - self.tester.scapy_append('pkt = rdpcap("%s")' % (pcap_file)) - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('pkt = rdpcap("%s")' % (pcap_file)) + self.tg_node.scapy_append( 'sendp(pkt, iface="%s", count=%d)' % (tx_interface, count) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() self.tcpdump_stop_sniff() @@ -221,16 +220,16 @@ class TestFlowClassifySoftnic(TestCase): """ Sent pkts that read from the pcap_file. """ - tx_port = self.tester.get_local_port(self.dut_ports[from_port]) - tx_interface = self.tester.get_interface(tx_port) + tx_port = self.tg_node.get_local_port(self.sut_ports[from_port]) + tx_interface = self.tg_node.get_interface(tx_port) # Prepare the pkts to be sent - self.tester.scapy_foreground() - self.tester.scapy_append('pkt = rdpcap("%s")' % (pcap_file)) - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('pkt = rdpcap("%s")' % (pcap_file)) + self.tg_node.scapy_append( 'sendp(pkt, iface="%s", count=%d)' % (tx_interface, count) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def send_and_check_packets( self, pcap_file, pkt, ltype, src_dst, addr_port, from_port, to_port @@ -272,7 +271,7 @@ class TestFlowClassifySoftnic(TestCase): rx_num = 0 tx_num = 0 for i in range(port): - stats = self.pmdout.get_pmd_stats(self.dut_ports[i]) + stats = self.pmdout.get_pmd_stats(self.sut_ports[i]) rx_num = rx_num + stats["RX-packets"] tx_num = tx_num + stats["TX-packets"] self.verify( @@ -324,7 +323,7 @@ class TestFlowClassifySoftnic(TestCase): actions = "rss queues %s end" % queue_idx if l4type == "": - self.dut.send_expect( + self.sut_node.send_expect( "flow %s %d group %d ingress pattern eth / %s proto mask %d src mask %s dst mask %s src spec %s dst spec %s / end actions %s / end" % ( operation, @@ -342,7 +341,7 @@ class TestFlowClassifySoftnic(TestCase): 60, ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow %s %d group %d ingress pattern eth / %s proto mask %d src mask %s dst mask %s src spec %s dst spec %s proto spec %d / %s src mask %d dst mask %d src spec %d dst spec %d / end actions %s / end" % ( operation, @@ -372,9 +371,9 @@ class TestFlowClassifySoftnic(TestCase): """ self.pmdout.wait_link_status_up("all") self.verify( - self.tester.is_interface_up(intf=itf), "Wrong link status, should be up" + self.tg_node.is_interface_up(intf=itf), "Wrong link status, should be up" ) - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() if src_dst == "src": if ptype == "ipv4": var = src_addr.split(".") @@ -383,10 +382,10 @@ class TestFlowClassifySoftnic(TestCase): for i in range(32): packet = ( r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="%s.%d", dst="%s", proto=17)/UDP(sport=100, dport=200)], iface="%s")' - % (self.dut_p0_mac, itf, ipaddr, i, dst_addr, itf) + % (self.sut_p0_mac, itf, ipaddr, i, dst_addr, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() elif ptype == "ipv6": var = src_addr.split(":") string = ":" @@ -397,10 +396,10 @@ class TestFlowClassifySoftnic(TestCase): for i in range(16): packet = ( r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="%s:%d", dst="%s", nh=17)/UDP(sport=100, dport=200)], iface="%s")' - % (self.dut_p0_mac, itf, ipaddr, i, dst_addr, itf) + % (self.sut_p0_mac, itf, ipaddr, i, dst_addr, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() elif src_dst == "dst": if ptype == "ipv4": @@ -410,10 +409,10 @@ class TestFlowClassifySoftnic(TestCase): for i in range(32): packet = ( r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="%s", dst="%s.%d", proto=17)/UDP(sport=100, dport=100)], iface="%s")' - % (self.dut_p0_mac, itf, src_addr, ipaddr, i, itf) + % (self.sut_p0_mac, itf, src_addr, ipaddr, i, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() elif ptype == "ipv6": var = dst_addr.split(":") string = ":" @@ -424,10 +423,10 @@ class TestFlowClassifySoftnic(TestCase): for i in range(16): packet = ( r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="%s", dst="%s:%d", nh=17)/UDP(sport=100, dport=200)], iface="%s")' - % (self.dut_p0_mac, itf, src_addr, ipaddr, i, itf) + % (self.sut_p0_mac, itf, src_addr, ipaddr, i, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() def check_packet_queue(self, queues=[], out=""): """ @@ -465,37 +464,37 @@ class TestFlowClassifySoftnic(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() - self.port_num = len(self.dut_ports) + self.sut_ports = self.sut_node.get_ports() + self.port_num = len(self.sut_ports) self.verify( self.port_num == 2 or self.port_num == 4, "Insufficient ports for speed testing", ) - self.dut_p0_pci = self.dut.get_port_pci(self.dut_ports[0]) - self.dut_p1_pci = self.dut.get_port_pci(self.dut_ports[1]) - self.dut_p0_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.dut_p1_mac = self.dut.get_mac_address(self.dut_ports[1]) - self.pf0_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_interface = self.dut.ports_info[self.dut_ports[1]]["intf"] + self.sut_p0_pci = self.sut_node.get_port_pci(self.sut_ports[0]) + self.sut_p1_pci = self.sut_node.get_port_pci(self.sut_ports[1]) + self.sut_p0_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.sut_p1_mac = self.sut_node.get_mac_address(self.sut_ports[1]) + self.pf0_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_interface = self.sut_node.ports_info[self.sut_ports[1]]["intf"] if self.port_num == 4: - self.dut_p2_pci = self.dut.get_port_pci(self.dut_ports[2]) - self.dut_p3_pci = self.dut.get_port_pci(self.dut_ports[3]) - self.dut_p2_mac = self.dut.get_mac_address(self.dut_ports[2]) - self.dut_p3_mac = self.dut.get_mac_address(self.dut_ports[3]) - self.pf2_interface = self.dut.ports_info[self.dut_ports[2]]["intf"] - self.pf3_interface = self.dut.ports_info[self.dut_ports[3]]["intf"] + self.sut_p2_pci = self.sut_node.get_port_pci(self.sut_ports[2]) + self.sut_p3_pci = self.sut_node.get_port_pci(self.sut_ports[3]) + self.sut_p2_mac = self.sut_node.get_mac_address(self.sut_ports[2]) + self.sut_p3_mac = self.sut_node.get_mac_address(self.sut_ports[3]) + self.pf2_interface = self.sut_node.ports_info[self.sut_ports[2]]["intf"] + self.pf3_interface = self.sut_node.ports_info[self.sut_ports[3]]["intf"] self.ipv4_mask = "255.255.255.255" self.ipv6_mask = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" self.portmask = 65535 self.protomask = 255 - self.pmdout = PmdOutput(self.dut) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.copy_config_files_to_dut() + self.pmdout = PmdOutput(self.sut_node) + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.copy_config_files_to_sut() self.param_flow_dir = self.get_flow_direction_param_of_tcpdump() @@ -567,12 +566,12 @@ class TestFlowClassifySoftnic(TestCase): dportspec=200, index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="2.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -582,7 +581,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="2.64.0.0", proto=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -592,7 +591,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="2.128.0.0", dst="0.0.0.0", proto=132) / SCTP(sport=100, dport=200) / ("X" * 48) @@ -602,7 +601,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="4.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -612,7 +611,7 @@ class TestFlowClassifySoftnic(TestCase): # send another 3 packets pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="3.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -622,7 +621,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="2.64.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -632,7 +631,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="2.128.0.0", dst="0.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -643,13 +642,13 @@ class TestFlowClassifySoftnic(TestCase): self.check_status(7, 4, self.port_num) # query rule - out = self.dut.send_expect("flow query %d 3 queue" % self.port_num, "QUEUE", 60) + out = self.sut_node.send_expect("flow query %d 3 queue" % self.port_num, "QUEUE", 60) # destroy rule 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow destroy %d rule 1" % self.port_num, "Flow rule #1 destroyed", 60 ) - destroy_out = self.dut.send_expect( + destroy_out = self.sut_node.send_expect( "flow list %d" % self.port_num, "testpmd> ", 60 ) self.verify("1" not in destroy_out, "destroy rule fail") @@ -657,7 +656,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="2.64.0.0", proto=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -671,17 +670,17 @@ class TestFlowClassifySoftnic(TestCase): self.verify("2.64.0.0" not in dst_ip_list, "rule 1 test fail") # flush rules - self.dut.send_expect("flow flush %d" % self.port_num, "testpmd> ", 60) - flush_out = self.dut.send_expect( + self.sut_node.send_expect("flow flush %d" % self.port_num, "testpmd> ", 60) + flush_out = self.sut_node.send_expect( "flow list %d" % self.port_num, "testpmd> ", 60 ) self.verify("Rule" not in flush_out, "flush rule fail") - self.dut.send_expect("clear port stats all", "testpmd> ", 60) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 60) # test all the rules pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="2.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -691,7 +690,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="2.64.0.0", proto=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -701,7 +700,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="2.128.0.0", dst="0.0.0.0", proto=132) / SCTP(sport=100, dport=200) / ("X" * 48) @@ -711,7 +710,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="4.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -720,7 +719,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(4, 0, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_5tuple_hash_table(self): """ @@ -782,12 +781,12 @@ class TestFlowClassifySoftnic(TestCase): dportspec=204, index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.1", dst="200.0.0.1", proto=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -797,7 +796,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.2", dst="200.0.0.2", proto=17) / UDP(sport=102, dport=202) / ("X" * 48) @@ -807,7 +806,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.3", dst="200.0.0.3", proto=132) / SCTP(sport=103, dport=203) / ("X" * 48) @@ -817,14 +816,14 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.4", dst="200.0.0.4", proto=17) / UDP(sport=104, dport=204) / ("X" * 48) ] self.send_and_check_packets(pcap_file, pkt, "ipv4", "dst", "200.0.0.4", 0, 0) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_addr_hash_table(self): """ @@ -870,12 +869,12 @@ class TestFlowClassifySoftnic(TestCase): dportspec=200, index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.1", dst="200.0.0.1", proto=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -885,7 +884,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.2", dst="200.0.0.2", proto=17) / UDP(sport=102, dport=202) / ("X" * 48) @@ -895,7 +894,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.3", dst="200.0.0.3", proto=132) / SCTP(sport=103, dport=203) / ("X" * 48) @@ -905,12 +904,12 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.4", dst="200.0.0.4") / ("X" * 48) ] self.send_and_check_packets(pcap_file, pkt, "ipv4", "src", "100.0.0.4", 0, 0) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) # match ipv4 dst_addr cmd = "pipeline RX table match hash ext key 8 mask FFFFFF0000000000 offset 286 buckets 16K size 64K action AP0" @@ -953,12 +952,12 @@ class TestFlowClassifySoftnic(TestCase): l4type="", index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.1", dst="200.0.0.1", proto=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -968,7 +967,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.2", dst="200.0.1.2", proto=17) / UDP(sport=102, dport=202) / ("X" * 48) @@ -978,7 +977,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.3", dst="200.0.2.3", proto=132) / SCTP(sport=103, dport=203) / ("X" * 48) @@ -988,12 +987,12 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.4", dst="200.0.3.4") / ("X" * 48) ] self.send_and_check_packets(pcap_file, pkt, "ipv4", "dst", "200.0.3.4", 0, 0) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) # match sport cmd = "pipeline RX table match hash ext key 8 mask FFFF000000000000 offset 290 buckets 16K size 64K action AP0" @@ -1037,12 +1036,12 @@ class TestFlowClassifySoftnic(TestCase): dportspec=200, index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.1", dst="200.0.0.1", proto=6) / TCP(sport=100, dport=201) / ("X" * 48) @@ -1052,7 +1051,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.2", dst="200.0.1.2", proto=17) / UDP(sport=101, dport=202) / ("X" * 48) @@ -1062,7 +1061,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.3", dst="200.0.2.3", proto=132) / SCTP(sport=102, dport=203) / ("X" * 48) @@ -1072,7 +1071,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.2", dst="200.0.1.2", proto=17) / UDP(sport=103, dport=202) / ("X" * 48) @@ -1082,7 +1081,7 @@ class TestFlowClassifySoftnic(TestCase): # send a packet without l4 info pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.4", dst="200.0.3.4") / ("X" * 48) ] @@ -1090,7 +1089,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(5, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_acl_table(self): """ @@ -1134,12 +1133,12 @@ class TestFlowClassifySoftnic(TestCase): sportspec=100, index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:5789", dst="2001::2", nh=17) / UDP(sport=101, dport=201) / ("X" * 48) @@ -1157,7 +1156,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:6789", dst="2001::2", nh=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1175,7 +1174,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:7789", dst="2001::2", nh=132) / SCTP(sport=101, dport=201) / ("X" * 48) @@ -1193,7 +1192,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:8789", dst="2001::2", nh=17) / UDP(sport=100, dport=201) / ("X" * 48) @@ -1203,7 +1202,7 @@ class TestFlowClassifySoftnic(TestCase): # send another 3 packets pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:9789", dst="2001::2", nh=17) / UDP(sport=101, dport=201) / ("X" * 48) @@ -1213,7 +1212,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:8789", dst="2001::2", nh=17) / UDP(sport=101, dport=201) / ("X" * 48) @@ -1223,7 +1222,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:6789", dst="2001::2", nh=17) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1232,7 +1231,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(7, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_addr_hash_table(self): """ @@ -1270,12 +1269,12 @@ class TestFlowClassifySoftnic(TestCase): src_spec="ABCD:EF01:2345:6789:ABCD:EF01:2345:8789", index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:5789", dst="2001::2", nh=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1293,7 +1292,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:6789", dst="2001::2", nh=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1311,7 +1310,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:7789", dst="2001::2", nh=132) / SCTP(sport=101, dport=201) / ("X" * 48) @@ -1329,7 +1328,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:8789", dst="2001::2", nh=17) / UDP(sport=100, dport=201) / ("X" * 48) @@ -1346,7 +1345,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="ABCD:EF01:2345:6789:ABCD:EF01:2345:9789", dst="2001::2", nh=17) / UDP(sport=101, dport=201) / ("X" * 48) @@ -1354,7 +1353,7 @@ class TestFlowClassifySoftnic(TestCase): self.write_pcap_file(pcap_file, pkt) self.send_pkts(0, pcap_file) self.check_status(5, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) # match ipv6 dst_addr cmd = "pipeline RX table match hash ext key 16 mask FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF offset 294 buckets 16K size 64K action AP0" @@ -1386,12 +1385,12 @@ class TestFlowClassifySoftnic(TestCase): dst_spec="ABCD:EF01:2345:6789:ABCD:EF01:2345:8789", index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(dst="ABCD:EF01:2345:6789:ABCD:EF01:2345:5789", src="2001::2", nh=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1409,7 +1408,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(dst="ABCD:EF01:2345:6789:ABCD:EF01:2345:6789", src="2001::2", nh=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1427,7 +1426,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(dst="ABCD:EF01:2345:6789:ABCD:EF01:2345:7789", src="2001::2", nh=132) / SCTP(sport=101, dport=201) / ("X" * 48) @@ -1445,7 +1444,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(dst="ABCD:EF01:2345:6789:ABCD:EF01:2345:8789", src="2001::2", nh=17) / UDP(sport=100, dport=201) / ("X" * 48) @@ -1462,7 +1461,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(dst="ABCD:EF01:2345:6789:ABCD:EF01:2345:9789", src="2001::2", nh=17) / UDP(sport=101, dport=201) / ("X" * 48) @@ -1470,7 +1469,7 @@ class TestFlowClassifySoftnic(TestCase): self.write_pcap_file(pcap_file, pkt) self.send_pkts(0, pcap_file) self.check_status(5, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_5tuple_hash_table(self): """ @@ -1538,12 +1537,12 @@ class TestFlowClassifySoftnic(TestCase): dportspec=204, index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::1", dst="0::1", nh=17) / UDP(sport=101, dport=201) / ("X" * 48) @@ -1553,7 +1552,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::2", dst="0::2", nh=6) / TCP(sport=102, dport=202) / ("X" * 48) @@ -1563,7 +1562,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::3", dst="0::3", nh=132) / SCTP(sport=103, dport=203) / ("X" * 48) @@ -1573,7 +1572,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::4", dst="0::4", nh=17) / UDP(sport=104, dport=204) / ("X" * 48) @@ -1582,7 +1581,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::1", dst="0::1", nh=6) / TCP(sport=101, dport=201) / ("X" * 48) @@ -1590,7 +1589,7 @@ class TestFlowClassifySoftnic(TestCase): self.write_pcap_file(pcap_file, pkt) self.send_pkts(0, pcap_file) self.check_status(5, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_inconsistent_rules(self): """ @@ -1603,44 +1602,44 @@ class TestFlowClassifySoftnic(TestCase): self.start_testpmd(filename, self.port_num) # create rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 4 group 0 ingress pattern eth / ipv4 proto mask 0 src mask 0.0.0.0 dst mask 255.255.255.255 src spec 100.0.0.1 dst spec 200.0.0.1 proto spec 17 / udp src mask 0 dst mask 0 src spec 100 dst spec 200 / end actions queue index 3 / end", "error", 60, ) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) cmd = "pipeline RX table match hash ext key 8 mask FFFFFF0000000000 offset 286 buckets 16K size 64K action AP0" self.set_table(cmd, filename) self.start_testpmd(filename, self.port_num) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 4 group 0 ingress pattern eth / ipv4 proto mask 0 src mask 0.0.0.0 dst mask 255.255.255.255 src spec 100.0.0.1 dst spec 200.0.0.1 proto spec 17 / udp src mask 0 dst mask 0 src spec 100 dst spec 200 / end actions queue index 3 / end", "error", 60, ) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) # ipv6 filename = "flow_ipv6_5tuple_hash_firmware.cli" cmd = "pipeline RX table match hash ext key 64 mask 0000FF00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000000000000000000000000000000 offset 274 buckets 16K size 64K action AP0" self.set_table(cmd, filename) self.start_testpmd(filename, self.port_num) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 4 group 0 ingress pattern eth / ipv6 proto mask 255 src mask ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff dst mask ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff src spec 2001::1 dst spec 0::1 proto spec 17 / udp src mask 0 dst mask 65535 src spec 31 dst spec 41 / end actions queue index 3 / end", "error", 60, ) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) cmd = "pipeline RX table match hash ext key 16 mask FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF offset 294 buckets 16K size 64K action AP0" self.set_table(cmd, filename) self.start_testpmd(filename, self.port_num) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 4 group 0 ingress pattern eth / ipv6 proto mask 0 src mask ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff dst mask 0:0:0:0:0:0:0:0 src spec ABCD:EF01:2345:6789:ABCD:EF01:2345:5789 dst spec 0:0:0:0:0:0:0:0 proto spec 17 / udp src mask 0 dst mask 0 src spec 0 dst spec 0 / end actions queue index 3 / end", "error", 60, ) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_hash_rss_action(self): """ @@ -1714,12 +1713,12 @@ class TestFlowClassifySoftnic(TestCase): action="rss", index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.1", dst="200.0.0.1", proto=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -1729,7 +1728,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.2", dst="200.0.0.2", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -1739,7 +1738,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.3", dst="200.0.0.3", proto=132) / SCTP(sport=100, dport=200) / ("X" * 48) @@ -1749,7 +1748,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.4", dst="200.0.0.4", proto=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -1759,7 +1758,7 @@ class TestFlowClassifySoftnic(TestCase): # not match test pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.4", dst="200.0.0.4", proto=6) / TCP(sport=101, dport=200) / ("X" * 48) @@ -1767,7 +1766,7 @@ class TestFlowClassifySoftnic(TestCase): self.write_pcap_file(pcap_file, pkt) self.send_pkts(0, pcap_file) self.check_status(5, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) # match ipv4 src_addr cmd = "pipeline RX table match hash ext key 16 mask 00FF0000FFFFFF00FFFFFFFFFFFFFFFF offset 278 buckets 16K size 64K action AP0" @@ -1800,20 +1799,20 @@ class TestFlowClassifySoftnic(TestCase): action="rss", index=[0, 1, 2, 3], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) self.send_continuous_packet( - "ipv4", "src", "100.0.0.1", "200.0.0.1", self.tester_itf + "ipv4", "src", "100.0.0.1", "200.0.0.1", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([0, 1, 2, 3], out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.send_continuous_packet( - "ipv4", "src", "100.0.1.2", "200.0.0.1", self.tester_itf + "ipv4", "src", "100.0.1.2", "200.0.0.1", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([0, 1, 2, 3], out) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) # match ipv4 src_addr cmd = "pipeline RX table match hash ext key 8 mask FFFF0000FFFFFFFF offset 282 buckets 16K size 64K action AP0" @@ -1850,27 +1849,27 @@ class TestFlowClassifySoftnic(TestCase): action="rss", index=[1, 2], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) self.send_continuous_packet( - "ipv4", "src", "100.0.0.1", "200.0.0.1", self.tester_itf + "ipv4", "src", "100.0.0.1", "200.0.0.1", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([0], out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.send_continuous_packet( - "ipv4", "src", "100.0.1.1", "200.0.0.2", self.tester_itf + "ipv4", "src", "100.0.1.1", "200.0.0.2", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([2, 3], out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.send_continuous_packet( - "ipv4", "src", "200.0.0.1", "200.0.0.2", self.tester_itf + "ipv4", "src", "200.0.0.1", "200.0.0.2", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([1, 2], out) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_hash_rss_action(self): """ @@ -1948,12 +1947,12 @@ class TestFlowClassifySoftnic(TestCase): action="rss", index=[0], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::1", dst="1001::1", nh=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -1963,7 +1962,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::2", dst="1001::2", nh=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -1973,7 +1972,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::3", dst="1001::3", nh=132) / SCTP(sport=100, dport=200) / ("X" * 48) @@ -1983,7 +1982,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::4", dst="1001::4", nh=6) / TCP(sport=100, dport=200) / ("X" * 48) @@ -1993,7 +1992,7 @@ class TestFlowClassifySoftnic(TestCase): # not match test pcap_file = "/tmp/route_4.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="2001::1", dst="1001::1", nh=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2001,7 +2000,7 @@ class TestFlowClassifySoftnic(TestCase): self.write_pcap_file(pcap_file, pkt) self.send_pkts(0, pcap_file) self.check_status(5, 4, self.port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) cmd = "pipeline RX table match hash ext key 64 mask 0000FF00FFFFFFFFFFFFFFFFFFFFFFFFFFFF0000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000000000000000000000000000000 offset 274 buckets 16K size 64K action AP0" self.set_table(cmd, filename) @@ -2036,28 +2035,28 @@ class TestFlowClassifySoftnic(TestCase): action="rss", index=[0, 1, 2, 3], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) self.send_continuous_packet( "ipv6", "src", "ABCD:EF01:2345:6789:ABCD:EF01:2345:0", "0::1", - self.tester_itf, + self.tg_itf, ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([0, 1, 2, 3], out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.send_continuous_packet( "ipv6", "src", "ABCD:EF01:2345:6789:ABCD:EF01:2346:0", "0::1", - self.tester_itf, + self.tg_itf, ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([0, 1, 2, 3], out) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) cmd = "pipeline RX table match hash ext key 64 mask 00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000FFFFFFFF000000000000000000000000000000000000000000000000 offset 274 buckets 16K size 64K action AP0" self.set_table(cmd, filename) @@ -2103,26 +2102,26 @@ class TestFlowClassifySoftnic(TestCase): action="rss", index=[1, 2], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) self.send_continuous_packet( - "ipv6", "dst", "2001::1", "1001::1", self.tester_itf + "ipv6", "dst", "2001::1", "1001::1", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([0], out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.send_continuous_packet( - "ipv6", "dst", "2001::2", "1001::1", self.tester_itf + "ipv6", "dst", "2001::2", "1001::1", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([2, 3], out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.send_continuous_packet( - "ipv6", "dst", "2001::1", "1002::1", self.tester_itf + "ipv6", "dst", "2001::1", "1002::1", self.tg_itf ) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) self.check_packet_queue([1, 2], out) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_acl_jump(self): """ @@ -2181,12 +2180,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="200.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2196,7 +2195,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="200.64.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2204,15 +2203,15 @@ class TestFlowClassifySoftnic(TestCase): self.send_and_check_packets(pcap_file, pkt, "ipv4", "dst", "200.64.0.0", 0, 1) # destroy rules of group 1 - self.dut.send_expect("flow destroy 2 rule 0", "testpmd> ", 60) - self.dut.send_expect("flow destroy 2 rule 1", "testpmd> ", 60) - destroy_out = self.dut.send_expect("flow list 2", "testpmd> ", 60) + self.sut_node.send_expect("flow destroy 2 rule 0", "testpmd> ", 60) + self.sut_node.send_expect("flow destroy 2 rule 1", "testpmd> ", 60) + destroy_out = self.sut_node.send_expect("flow list 2", "testpmd> ", 60) self.verify("QUEUE" not in destroy_out, "destroy rule fail") # rule 2 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.0", dst="200.0.0.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2225,7 +2224,7 @@ class TestFlowClassifySoftnic(TestCase): dst_ip_list.append(packet.getlayer(1).dst) self.verify("200.0.0.0" not in dst_ip_list, "rule 2 test fail") - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_hash_jump(self): """ @@ -2284,12 +2283,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.0", dst="2.20.21.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2299,7 +2298,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.1", dst="2.20.21.1", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2307,15 +2306,15 @@ class TestFlowClassifySoftnic(TestCase): self.send_and_check_packets(pcap_file, pkt, "ipv4", "dst", "2.20.21.1", 0, 1) # destroy rules of group 1 - self.dut.send_expect("flow destroy 2 rule 0", "Flow rule #0 destroyed", 60) - self.dut.send_expect("flow destroy 2 rule 1", "Flow rule #1 destroyed", 60) - destroy_out = self.dut.send_expect("flow list 2", "testpmd> ", 60) + self.sut_node.send_expect("flow destroy 2 rule 0", "Flow rule #0 destroyed", 60) + self.sut_node.send_expect("flow destroy 2 rule 1", "Flow rule #1 destroyed", 60) + destroy_out = self.sut_node.send_expect("flow list 2", "testpmd> ", 60) self.verify("QUEUE" not in destroy_out, "destroy rule fail") # rule 2 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.1", dst="2.20.21.1", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2328,7 +2327,7 @@ class TestFlowClassifySoftnic(TestCase): dst_ip_list.append(packet.getlayer(1).dst) self.verify("2.20.21.1" not in dst_ip_list, "rule 3 test fail") - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_acl_hash_jump(self): """ @@ -2391,12 +2390,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.0", dst="2.20.21.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2406,7 +2405,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.1", dst="2.20.21.1", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2416,7 +2415,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.0", dst="2.20.21.0", proto=17) / UDP(sport=101, dport=200) / ("X" * 48) @@ -2427,7 +2426,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.1", dst="2.20.21.1", proto=17) / UDP(sport=100, dport=201) / ("X" * 48) @@ -2436,7 +2435,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(4, 2, port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv4_hash_acl_jump(self): """ @@ -2499,12 +2498,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.0", dst="2.20.21.0", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2514,7 +2513,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.1", dst="2.20.21.1", proto=17) / UDP(sport=100, dport=200) / ("X" * 48) @@ -2524,7 +2523,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.0", dst="2.20.21.2", proto=17) / UDP(sport=101, dport=200) / ("X" * 48) @@ -2535,7 +2534,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="1.10.11.1", dst="2.20.21.3", proto=17) / UDP(sport=100, dport=201) / ("X" * 48) @@ -2544,7 +2543,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(4, 2, port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_acl_jump(self): """ @@ -2608,12 +2607,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2623,7 +2622,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::2") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2632,7 +2631,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::2", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2641,7 +2640,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::2", dst="2001::2") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2650,7 +2649,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(4, 3, port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_hash_jump(self): """ @@ -2725,12 +2724,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2740,7 +2739,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::2", dst="2001::2") / TCP(sport=100, dport=200) / ("X" * 48) @@ -2748,15 +2747,15 @@ class TestFlowClassifySoftnic(TestCase): self.send_and_check_packets(pcap_file, pkt, "ipv6", "dst", "2001::2", 0, 1) # destroy rules of group 1 - self.dut.send_expect("flow destroy 2 rule 0", "Flow rule #0 destroyed", 60) - self.dut.send_expect("flow destroy 2 rule 1", "Flow rule #1 destroyed", 60) - destroy_out = self.dut.send_expect("flow list 2", "testpmd> ", 60) + self.sut_node.send_expect("flow destroy 2 rule 0", "Flow rule #0 destroyed", 60) + self.sut_node.send_expect("flow destroy 2 rule 1", "Flow rule #1 destroyed", 60) + destroy_out = self.sut_node.send_expect("flow list 2", "testpmd> ", 60) self.verify("QUEUE" not in destroy_out, "destroy rule fail") # rule 2 test pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2769,7 +2768,7 @@ class TestFlowClassifySoftnic(TestCase): dst_ip_list.append(packet.getlayer(1).dst) self.verify("2001::1" not in dst_ip_list, "rule 2 test fail") - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_acl_hash_jump(self): """ @@ -2834,12 +2833,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2849,7 +2848,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::2", dst="2001::2") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2858,7 +2857,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::3", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2868,7 +2867,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::4", dst="2001::2") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2877,7 +2876,7 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(4, 2, port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def test_ipv6_hash_acl_jump(self): """ @@ -2944,12 +2943,12 @@ class TestFlowClassifySoftnic(TestCase): action="jump", index=[1], ) - self.dut.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::1") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2959,7 +2958,7 @@ class TestFlowClassifySoftnic(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::2", dst="2001::2") / UDP(sport=100, dport=200) / ("X" * 48) @@ -2968,7 +2967,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::1", dst="2001::1") / UDP(sport=100, dport=201) / ("X" * 48) @@ -2978,7 +2977,7 @@ class TestFlowClassifySoftnic(TestCase): pcap_file = "/tmp/route_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IPv6(src="1001::2", dst="2001::2") / UDP(sport=100, dport=202) / ("X" * 48) @@ -2987,14 +2986,14 @@ class TestFlowClassifySoftnic(TestCase): self.send_pkts(0, pcap_file) self.check_status(4, 2, port_num) - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_flow_filtering.py b/tests/TestSuite_flow_filtering.py index cae2d9ac..472eaf2e 100644 --- a/tests/TestSuite_flow_filtering.py +++ b/tests/TestSuite_flow_filtering.py @@ -6,7 +6,7 @@ import os import re import time -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.settings import HEADER_SIZE from framework.test_case import TestCase @@ -18,47 +18,47 @@ class TestFlowFiltering(TestCase): Run before each test suite """ # initialize ports topology - self.dut_ports = self.dut.get_ports(self.nic) - self.dts_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.txitf = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.dts_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.txitf = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - out = self.dut.build_dpdk_apps("./examples/flow_filtering") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + out = self.sut_node.build_dpdk_apps("./examples/flow_filtering") self.verify("Error" not in out, "Compilation failed") def set_up(self): """ Run before each test case. """ - self.eal_para = self.dut.create_eal_parameters(cores=[1]) - cmd = self.dut.apps_name["flow_filtering"] + self.eal_para - out = self.dut.send_command(cmd, timeout=15) + self.eal_para = self.sut_node.create_eal_parameters(cores=[1]) + cmd = self.sut_node.apps_name["flow_filtering"] + self.eal_para + out = self.sut_node.send_command(cmd, timeout=15) self.verify("Error" not in out, "flow launch failed") def send_packet(self, pkg): """ Send packets according to parameters. """ - self.pkt = packet.Packet() + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() for packet_type in list(pkg.keys()): - self.pkt.append_pkt(pkg[packet_type]) - self.pkt.send_pkt(crb=self.tester, tx_port=self.txitf, count=1) + self.scapy_pkt_builder.append_pkt(pkg[packet_type]) + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.txitf, count=1) time.sleep(2) def check_flow_queue(self): """ - Get dut flow result + Get SUT flow result """ - result = self.dut.get_session_output(timeout=2) + result = self.sut_node.get_session_output(timeout=2) if str.upper(self.dts_mac) in result: - self.verify("queue" in result, "Dut receive flow failed!") + self.verify("queue" in result, "Sut receive flow failed!") queue_result = re.findall(r"queue=(\S+)", result) return queue_result else: - raise Exception("Dut not receive correct package!") + raise Exception("Sut not receive correct package!") def test_flow_filtering_match_rule(self): pkg = { @@ -69,7 +69,7 @@ class TestFlowFiltering(TestCase): } self.send_packet(pkg) queue_list = self.check_flow_queue() - self.verify(len(queue_list) == 2, "Dut receive flow queue error!") + self.verify(len(queue_list) == 2, "Sut receive flow queue error!") self.verify( queue_list[0] == queue_list[1] and queue_list[0] == "0x1", "Flow filter not match rule!", @@ -90,10 +90,10 @@ class TestFlowFiltering(TestCase): """ Run after each test case. """ - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_generic_flow_api.py b/tests/TestSuite_generic_flow_api.py index 1f1c5d0f..b0fd0853 100644 --- a/tests/TestSuite_generic_flow_api.py +++ b/tests/TestSuite_generic_flow_api.py @@ -17,16 +17,16 @@ import time import scapy.layers.inet from scapy.utils import rdpcap -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils -from framework.crb import Crb -from framework.dut import Dut from framework.exception import VerifyFailure +from framework.node import Node from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut from framework.settings import DRIVERS, HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase, check_supported_nic -from framework.virt_dut import VirtDut +from framework.virt_sut import VirtSut MAX_VLAN = 4095 MAX_QUEUE = 15 @@ -59,37 +59,37 @@ class TestGeneric_flow_api(TestCase): elif self.nic in ["IGC-I225_LM"]: MAX_QUEUE = 3 # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) global valports - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] global portMask portMask = utils.create_mask(valports[:2]) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.cores = "1S/8C/1T" self.pf_cores = "1S/8C/1T" - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() self.outer_mac = "00:11:22:33:44:55" self.inner_mac = "00:11:22:33:44:66" self.wrong_mac = "00:11:22:33:44:77" self.vf_flag = 0 - self.pkt_obj = packet.Packet() - self.app_path = self.dut.apps_name["test-pmd"] + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + self.app_path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ Run before each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def setup_env(self): """ @@ -99,13 +99,13 @@ class TestGeneric_flow_api(TestCase): self.vf_flag = 1 # PF is bound to igb_uio, while VF is bound to vfio-pci. - self.dut.send_expect("modprobe uio", "#", 70) - self.dut.send_expect("insmod ./" + self.target + "/kmod/igb_uio.ko", "#", 60) - self.dut.send_expect("modprobe vfio-pci", "#", 70) + self.sut_node.send_expect("modprobe uio", "#", 70) + self.sut_node.send_expect("insmod ./" + self.target + "/kmod/igb_uio.ko", "#", 60) + self.sut_node.send_expect("modprobe vfio-pci", "#", 70) # create two vfs - self.dut.generate_sriov_vfs_by_port(self.dut_ports[0], 2, "igb_uio") - self.sriov_vfs_port = self.dut.ports_info[self.dut_ports[0]]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[0], 2, "igb_uio") + self.sriov_vfs_port = self.sut_node.ports_info[self.sut_ports[0]]["vfs_port"] try: for port in self.sriov_vfs_port: port.bind_driver(driver="vfio-pci") @@ -122,11 +122,11 @@ class TestGeneric_flow_api(TestCase): time.sleep(2) self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) else: - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) self.vf_flag = 0 @@ -143,7 +143,7 @@ class TestGeneric_flow_api(TestCase): verify the packet to the expected queue or be dropped : check_fdir=[exist|non-exist] """ - # self.tester.scapy_execute() + # self.tg_node.scapy_execute() # time.sleep(2) verify_mac = verify_mac.upper() @@ -154,8 +154,8 @@ class TestGeneric_flow_api(TestCase): outstring_vf1 = self.session_third.send_expect("stop", "testpmd> ", 120) self.logger.info("vf0: %s" % out_vf0) self.logger.info("vf1: %s" % out_vf1) - out_pf = self.dut.get_session_output(timeout=2) - outstring_pf = self.dut.send_expect("stop", "testpmd> ", 120) + out_pf = self.sut_node.get_session_output(timeout=2) + outstring_pf = self.sut_node.send_expect("stop", "testpmd> ", 120) self.logger.info("pf: %s" % out_pf) time.sleep(2) @@ -167,7 +167,7 @@ class TestGeneric_flow_api(TestCase): else: self.verify(verify_mac not in out_vf1, "the packet is not dropped.") else: - result_scanner = r"port\s*%s/queue\s?[0-9]+" % self.dut_ports[0] + result_scanner = r"port\s*%s/queue\s?[0-9]+" % self.sut_ports[0] scanner = re.compile(result_scanner, re.DOTALL) if pf_vf == "pf": self.verify( @@ -215,7 +215,7 @@ class TestGeneric_flow_api(TestCase): ) elif check_fdir == "non-exist": self.verify("FDIR" not in out_pf, "FDIR information should not be printed.") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") if self.vf_flag == 1: self.session_secondary.send_expect("start", "testpmd> ") @@ -255,14 +255,14 @@ class TestGeneric_flow_api(TestCase): self.pmdout.execute_cmd("set verbose 1") self.pmdout.execute_cmd("start") self.pmdout.execute_cmd("show port info all") - self.pmdout.wait_link_status_up(self.dut_ports[0]) + self.pmdout.wait_link_status_up(self.sut_ports[0]) def compare_memory_rules(self, expectedRules): """ dump all flow rules that have been created in memory and compare that total rules number with the given expected number to see if they are equal, as to get your conclusion after you have deleted any flow rule entry. """ - outstring = self.dut.send_expect("flow list 0", "testpmd> ") + outstring = self.sut_node.send_expect("flow list 0", "testpmd> ") result_scanner = r"\d*.*?\d*.*?\d*.*?=>*" scanner = re.compile(result_scanner, re.DOTALL) m = scanner.findall(outstring) @@ -306,10 +306,10 @@ class TestGeneric_flow_api(TestCase): # check if there are expected flow rules have been created self.compare_memory_rules(rule_num) # check if one rule destoried with success - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") self.compare_memory_rules(rule_num - 1) # check if all flow rules have been removed with success - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.compare_memory_rules(0) def flow_test_process(self, flow_process, flow_action): @@ -342,7 +342,7 @@ class TestGeneric_flow_api(TestCase): "IGB_1G-I350_COPPER", "I40E_10G-10G_BASE_T_BC", ]: - self.dut.send_expect(flow_cmd, "error") + self.sut_node.send_expect(flow_cmd, "error") elif "type is 0x8100" in flow_cmd: if self.nic in [ "I40E_10G-SFP_XL710", @@ -352,7 +352,7 @@ class TestGeneric_flow_api(TestCase): "I40E_10G-10G_BASE_T_X722", "I40E_10G-10G_BASE_T_BC", ]: - self.dut.send_expect(flow_cmd, "error") + self.sut_node.send_expect(flow_cmd, "error") # vf queue id exceeds max vf queue number. elif ( ("vf0" in flow_action["flows"]) @@ -360,9 +360,9 @@ class TestGeneric_flow_api(TestCase): or ("vf0" in flow_action["actions"]) or ("vf1" in flow_action["actions"]) ) and (("index %s" % str(MAX_VFQUEUE + 1)) in flow_cmd): - self.dut.send_expect(flow_cmd, "error") + self.sut_node.send_expect(flow_cmd, "error") else: - self.dut.send_expect(flow_cmd, "validated") + self.sut_node.send_expect(flow_cmd, "validated") elif "create" in flow_cmd: # ethertype invalid or queue id exceeds max queue number. if ( @@ -385,7 +385,7 @@ class TestGeneric_flow_api(TestCase): "IGB_1G-I350_COPPER", "I40E_10G-10G_BASE_T_BC", ]: - self.dut.send_expect(flow_cmd, "error") + self.sut_node.send_expect(flow_cmd, "error") elif "type is 0x8100" in flow_cmd: if self.nic in [ "I40E_10G-SFP_XL710", @@ -395,7 +395,7 @@ class TestGeneric_flow_api(TestCase): "I40E_10G-10G_BASE_T_X722", "I40E_10G-10G_BASE_T_BC", ]: - self.dut.send_expect(flow_cmd, "error") + self.sut_node.send_expect(flow_cmd, "error") # vf queue id exceeds max vf queue number. elif ( ("vf0" in flow_action["flows"]) @@ -403,9 +403,9 @@ class TestGeneric_flow_api(TestCase): or ("vf0" in flow_action["actions"]) or ("vf1" in flow_action["actions"]) ) and (("index %s" % str(MAX_VFQUEUE + 1)) in flow_cmd): - self.dut.send_expect(flow_cmd, "error") + self.sut_node.send_expect(flow_cmd, "error") else: - self.dut.send_expect(flow_cmd, "created") + self.sut_node.send_expect(flow_cmd, "created") rule_created = 1 # The rule is created successfully, so send the consistent packet. @@ -817,11 +817,11 @@ class TestGeneric_flow_api(TestCase): py_version = sys.version if py_version.startswith("3."): - self.pkt_obj.pktgen.pkts.clear() + self.scapy_pkt_builder.scapy_pkt_util.pkts.clear() else: - del self.pkt_obj.pktgen.pkts[:] - self.pkt_obj.append_pkt(pktstr) - self.pkt_obj.send_pkt(self.tester, tx_port=self.tester_itf, count=count) + del self.scapy_pkt_builder.scapy_pkt_util.pkts[:] + self.scapy_pkt_builder.append_pkt(pktstr) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=count) def send_packet(self, itf, tran_type, enable=None): """ @@ -830,9 +830,9 @@ class TestGeneric_flow_api(TestCase): global reta_lines global name global value - self.tester.scapy_foreground() - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) # send packet with different source and dest ip if tran_type == "l2_payload": @@ -851,14 +851,14 @@ class TestGeneric_flow_api(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/Dot1Q(id=0x8100,vlan=1)/Dot1Q(id=0x8100,vlan=2,type=0xaaaa)/Raw(load="x"*60)], iface="%s")' % (mac, itf, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.dut.get_session_output(timeout=1) - self.dut.send_expect("stop", "testpmd>") + out = self.sut_node.get_session_output(timeout=1) + self.sut_node.send_expect("stop", "testpmd>") lines = out.split("\r\n") reta_line = {} # collect the hash result and the queue id @@ -929,18 +929,18 @@ class TestGeneric_flow_api(TestCase): "%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # validate and create the flow rules - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index 3 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index 3 / end", "created", ) @@ -962,17 +962,17 @@ class TestGeneric_flow_api(TestCase): ) # the ipv6 rule is conflicted with ipv4 rule. - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) # validate and create the flow rules q_idx = "2" if self.nic == "IGC-I225_LM" else "4" - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv6 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index %s / end" % (q_idx), "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index %s / end" % (q_idx), "created", @@ -1015,9 +1015,9 @@ class TestGeneric_flow_api(TestCase): "%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # create the flow rules basic_flow_actions = [ @@ -1124,9 +1124,9 @@ class TestGeneric_flow_api(TestCase): "%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # i350 and 82580 only support 2-tuple, and don't support SCTP @@ -1238,9 +1238,9 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # i40e,ixgbe and igb support different packet types. @@ -1356,9 +1356,9 @@ class TestGeneric_flow_api(TestCase): "--rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) basic_flow_actions = [ @@ -1389,7 +1389,7 @@ class TestGeneric_flow_api(TestCase): pktstr='Ether(src="%s",dst="%s")/IP()/TCP()' % (src_mac, dst_mac) ) - out_pf = self.dut.get_session_output(timeout=2) + out_pf = self.sut_node.get_session_output(timeout=2) if mark == 1: self.verify(mark_info in out_pf, "the packet not mark the expect index.") else: @@ -1424,21 +1424,21 @@ class TestGeneric_flow_api(TestCase): "--rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("port config all rss all", "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # only dst mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "created", ) @@ -1454,7 +1454,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-other" @@ -1464,13 +1464,13 @@ class TestGeneric_flow_api(TestCase): # only src mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / end actions mark id 1 / rss / end", "created", ) @@ -1486,7 +1486,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-other" @@ -1496,13 +1496,13 @@ class TestGeneric_flow_api(TestCase): # dst mac and src mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "created", ) @@ -1524,7 +1524,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # destroy - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) self.sendpkt_check_result("99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0) @@ -1556,21 +1556,21 @@ class TestGeneric_flow_api(TestCase): "--rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("port config all rss all", "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # only dst mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / udp / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / udp / end actions mark id 1 / rss / end", "created", ) @@ -1586,7 +1586,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-udp" @@ -1596,13 +1596,13 @@ class TestGeneric_flow_api(TestCase): # only src mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / udp / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / udp / end actions mark id 1 / rss / end", "created", ) @@ -1618,7 +1618,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-udp" @@ -1628,13 +1628,13 @@ class TestGeneric_flow_api(TestCase): # dst mac and src mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / udp / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / udp / end actions mark id 1 / rss / end", "created", ) @@ -1656,7 +1656,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # destroy - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-udp" @@ -1690,21 +1690,21 @@ class TestGeneric_flow_api(TestCase): "--rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("port config all rss all", "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # only dst mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / tcp / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / tcp / end actions mark id 1 / rss / end", "created", ) @@ -1720,7 +1720,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-tcp" @@ -1730,13 +1730,13 @@ class TestGeneric_flow_api(TestCase): # only src mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / tcp / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / tcp / end actions mark id 1 / rss / end", "created", ) @@ -1752,7 +1752,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # flush - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-tcp" @@ -1762,13 +1762,13 @@ class TestGeneric_flow_api(TestCase): # dst mac and src mac # validate - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / tcp / end actions mark id 1 / rss / end", "validated", ) # create - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / tcp / end actions mark id 1 / rss / end", "created", ) @@ -1790,7 +1790,7 @@ class TestGeneric_flow_api(TestCase): self.compare_memory_rules(1) # destroy - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-tcp" @@ -1824,22 +1824,22 @@ class TestGeneric_flow_api(TestCase): "--rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("port config all rss all", "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # delete the first rule of three rules - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 22:22:22:22:22:22 / ipv4 / end actions mark id 2 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 33:33:33:33:33:33 / ipv4 / end actions mark id 3 / rss / end", "created", ) @@ -1856,7 +1856,7 @@ class TestGeneric_flow_api(TestCase): "99:99:99:99:99:99", "33:33:33:33:33:33", 1, 3, 1, "ipv4-other" ) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 120) self.compare_memory_rules(2) @@ -1870,7 +1870,7 @@ class TestGeneric_flow_api(TestCase): "99:99:99:99:99:99", "33:33:33:33:33:33", 1, 3, 1, "ipv4-other" ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.sendpkt_check_result( "99:99:99:99:99:99", "11:11:11:11:11:11", 0, 1, 0, "ipv4-other" @@ -1883,20 +1883,20 @@ class TestGeneric_flow_api(TestCase): ) # delete the second rule of three rules - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 22:22:22:22:22:22 / ipv4 / end actions mark id 2 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 33:33:33:33:33:33 / ipv4 / end actions mark id 3 / rss / end", "created", ) - self.dut.send_expect("flow destroy 0 rule 1", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd> ", 120) self.compare_memory_rules(2) @@ -1910,23 +1910,23 @@ class TestGeneric_flow_api(TestCase): "99:99:99:99:99:99", "33:33:33:33:33:33", 1, 3, 1, "ipv4-other" ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # delete the third rule of three rules - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 22:22:22:22:22:22 / ipv4 / end actions mark id 2 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 33:33:33:33:33:33 / ipv4 / end actions mark id 3 / rss / end", "created", ) - self.dut.send_expect("flow destroy 0 rule 2", "testpmd> ", 120) + self.sut_node.send_expect("flow destroy 0 rule 2", "testpmd> ", 120) self.compare_memory_rules(2) @@ -1940,7 +1940,7 @@ class TestGeneric_flow_api(TestCase): "99:99:99:99:99:99", "33:33:33:33:33:33", 0, 3, 0, "ipv4-other" ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") def test_fdir_L2_mac_filter_negative(self): """ @@ -1968,68 +1968,68 @@ class TestGeneric_flow_api(TestCase): "--rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=test1" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # ip in command - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 dst is 1.1.1.1 / end actions mark id 2 / rss / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 dst is 1.1.1.1 / end actions mark id 2 / rss / end", "error", ) # udp in command - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / udp dst is 111 / end actions mark id 2 / rss / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / udp dst is 111 / end actions mark id 2 / rss / end", "error", ) # tcp in command - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / tcp dst is 111 / end actions mark id 2 / rss / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / tcp dst is 111 / end actions mark id 2 / rss / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 3 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / end actions mark id 1 / rss / end", "Invalid", ) - self.dut.send_expect("flow flush 0", "testpmd> ", 120) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 120) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 / ipv4 / end actions mark id 1 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "Invalid", ) - self.dut.send_expect("flow flush 0", "testpmd> ", 120) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 120) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth src is 99:99:99:99:99:99 dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 1 / rss / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth dst is 11:11:11:11:11:11 / ipv4 / end actions mark id 3 / rss / end", "Invalid", ) @@ -2061,9 +2061,9 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # start testpmd on vf0 self.session_secondary.send_expect( @@ -2407,9 +2407,9 @@ class TestGeneric_flow_api(TestCase): "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # start testpmd on vf0 self.session_secondary.send_expect( @@ -2666,9 +2666,9 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=perfect --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) if self.nic in ["IXGBE_10G-X550EM_X_10G_T", "IXGBE_10G-X550T"]: @@ -2829,9 +2829,9 @@ class TestGeneric_flow_api(TestCase): "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) self.session_secondary.send_expect( "%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" @@ -3102,9 +3102,9 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=signature --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) if self.nic in ["IXGBE_10G-82599_SFP"]: # create the flow rules @@ -3229,11 +3229,11 @@ class TestGeneric_flow_api(TestCase): }, ] extrapkt_rulenum = self.all_flows_process(basic_flow_actions) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern fuzzy thresh spec 2 thresh last 5 thresh mask 0xffffffff / ipv6 src is 2001::1 dst is 2001::2 / udp src is 22 dst is 23 / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern fuzzy thresh spec 2 thresh last 5 thresh mask 0xffffffff / ipv6 src is 2001::1 dst is 2001::2 / udp src is 22 dst is 23 / end actions queue index 1 / end", "created", ) @@ -3396,11 +3396,11 @@ class TestGeneric_flow_api(TestCase): }, ] extrapkt_rulenum = self.all_flows_process(basic_flow_actions) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern fuzzy thresh spec 2 thresh last 5 thresh mask 0xffffffff / ipv6 src is 2001::1 dst is 2001::2 / udp src is 22 dst is 23 / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern fuzzy thresh spec 2 thresh last 5 thresh mask 0xffffffff / ipv6 src is 2001::1 dst is 2001::2 / udp src is 22 dst is 23 / end actions queue index 1 / end", "created", ) @@ -3469,96 +3469,96 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=pf" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # creat the flow rules # l2-payload exceeds the max length of raw match is 16bytes - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth type is 0x0807 / raw relative is 1 pattern is abcdefghijklmnopq / end actions queue index 1 / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth type is 0x0807 / raw relative is 1 pattern is abcdefghijklmnopq / end actions queue index 1 / end", "Exceeds maximal payload limit", ) # l2-payload equal the max length of raw match is 16bytes - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth type is 0x0807 / raw relative is 1 pattern is abcdefghijklmnop / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth type is 0x0807 / raw relative is 1 pattern is abcdefghijklmnop / end actions queue index 1 / end", "created", ) # ipv4-other the most 3 fields can be matched, and the max sum bytes of the three fields is 16 bytes. - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / vlan tci is 4095 / ipv4 proto is 255 ttl is 40 / raw relative is 1 offset is 2 pattern is ab / raw relative is 1 offset is 10 pattern is abcdefghij / raw relative is 1 offset is 0 pattern is abcd / end actions queue index 2 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / vlan tci is 4095 / ipv4 proto is 255 ttl is 40 / raw relative is 1 offset is 2 pattern is ab / raw relative is 1 offset is 10 pattern is abcdefghij / raw relative is 1 offset is 0 pattern is abcd / end actions queue index 2 / end", "created", ) # ipv4-udp - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 / udp src is 22 dst is 23 / raw relative is 1 offset is 2 pattern is fhds / end actions queue index 3 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 / udp src is 22 dst is 23 / raw relative is 1 offset is 2 pattern is fhds / end actions queue index 3 / end", "created", ) # ipv4-tcp - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 tos is 4 ttl is 3 / tcp src is 32 dst is 33 / raw relative is 1 offset is 2 pattern is hijk / end actions queue index 4 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 tos is 4 ttl is 3 / tcp src is 32 dst is 33 / raw relative is 1 offset is 2 pattern is hijk / end actions queue index 4 / end", "created", ) # ipv4-sctp - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 / sctp src is 42 / raw relative is 1 offset is 2 pattern is abcd / end actions queue index 5 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 / sctp src is 42 / raw relative is 1 offset is 2 pattern is abcd / end actions queue index 5 / end", "created", ) # flush all the rules, then re-create the rules, fix DPDK-23826 - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # l2-payload equal the max length of raw match is 16bytes - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth type is 0x0807 / raw relative is 1 pattern is abcdefghijklmnop / end actions queue index 1 / end", "created", ) # ipv4-other the most 3 fields can be matched, and the max sum bytes of the three fields is 16 bytes. - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / vlan tci is 4095 / ipv4 proto is 255 ttl is 40 / raw relative is 1 offset is 2 pattern is ab / raw relative is 1 offset is 10 pattern is abcdefghij / raw relative is 1 offset is 0 pattern is abcd / end actions queue index 2 / end", "created", ) # ipv4-udp - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 / udp src is 22 dst is 23 / raw relative is 1 offset is 2 pattern is fhds / end actions queue index 3 / end", "created", ) # ipv4-tcp - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 tos is 4 ttl is 3 / tcp src is 32 dst is 33 / raw relative is 1 offset is 2 pattern is hijk / end actions queue index 4 / end", "created", ) # ipv4-sctp - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 2.2.2.4 dst is 2.2.2.5 / sctp src is 42 / raw relative is 1 offset is 2 pattern is abcd / end actions queue index 5 / end", "created", ) @@ -3612,7 +3612,7 @@ class TestGeneric_flow_api(TestCase): self.verify_rulenum(5) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) self.pmdout.start_testpmd( @@ -3620,18 +3620,18 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=pf --socket-mem 1024,1024" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # ipv6-tcp - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / vlan tci is 1 / ipv6 src is 2001::1 dst is 2001::2 tc is 3 hop is 30 / tcp src is 32 dst is 33 / raw relative is 1 offset is 0 pattern is hijk / raw relative is 1 offset is 8 pattern is abcdefgh / end actions queue index 6 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / vlan tci is 1 / ipv6 src is 2001::1 dst is 2001::2 tc is 3 hop is 30 / tcp src is 32 dst is 33 / raw relative is 1 offset is 0 pattern is hijk / raw relative is 1 offset is 8 pattern is abcdefgh / end actions queue index 6 / end", "created", ) @@ -3646,8 +3646,8 @@ class TestGeneric_flow_api(TestCase): ) # destroy the rule, then re-create the rule, fix DPDK-23826 - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / vlan tci is 1 / ipv6 src is 2001::1 dst is 2001::2 tc is 3 hop is 30 / tcp src is 32 dst is 33 / raw relative is 1 offset is 0 pattern is hijk / raw relative is 1 offset is 8 pattern is abcdefgh / end actions queue index 6 / end", "created", ) @@ -3666,17 +3666,17 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=perfect --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # ipv4-udp-flexbytes - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 src is 192.168.0.1 dst is 192.168.0.2 / udp src is 24 dst is 25 / raw relative is 0 search is 0 offset is 44 limit is 0 pattern is 86 / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.1 dst is 192.168.0.2 / udp src is 24 dst is 25 / raw relative is 0 search is 0 offset is 44 limit is 0 pattern is 86 / end actions queue index 1 / end", "created", ) @@ -3690,7 +3690,7 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="1", verify_mac=self.outer_mac ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) # the second flexbytes rule should be created after the testpmd reset, because the flexbytes rule is global bit masks @@ -3699,23 +3699,23 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=perfect --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # ipv4-tcp-flexbytes spec-mask - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 src is 192.168.0.3 dst is 192.168.0.4 / tcp src is 22 dst is 23 / raw relative spec 0 relative mask 1 search spec 0 search mask 1 offset spec 54 offset mask 0xffffffff limit spec 0 limit mask 0xffff pattern is ab pattern is cd / end actions queue index 2 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.3 dst is 192.168.0.4 / tcp src is 22 dst is 23 / raw relative spec 0 relative mask 1 search spec 0 search mask 1 offset spec 54 offset mask 0xffffffff limit spec 0 limit mask 0xffff pattern is ab pattern is cd / end actions queue index 2 / end", "created", ) # destroy the rule, then re-create the rule, fix DPDK-23826 - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.3 dst is 192.168.0.4 / tcp src is 22 dst is 23 / raw relative spec 0 relative mask 1 search spec 0 search mask 1 offset spec 54 offset mask 0xffffffff limit spec 0 limit mask 0xffff pattern is ab pattern is cd / end actions queue index 2 / end", "created", ) @@ -3736,7 +3736,7 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="2", verify_mac=self.outer_mac ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) # signature mode @@ -3745,27 +3745,27 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=signature --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # ipv4-sctp-flexbytes if self.nic in ["IXGBE_10G-X550EM_X_10G_T", "IXGBE_10G-X550T"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern fuzzy thresh is 6 / eth / ipv4 src is 192.168.0.1 dst is 192.168.0.2 / sctp src is 24 dst is 25 / raw relative is 0 search is 0 offset is 48 limit is 0 pattern is ab / end actions queue index 3 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern fuzzy thresh is 6 / eth / ipv4 src is 192.168.0.1 dst is 192.168.0.2 / sctp src is 24 dst is 25 / raw relative is 0 search is 0 offset is 48 limit is 0 pattern is ab / end actions queue index 3 / end", "created", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern fuzzy thresh is 6 / eth / ipv4 src is 192.168.0.1 dst is 192.168.0.2 / sctp / raw relative is 0 search is 0 offset is 48 limit is 0 pattern is ab / end actions queue index 3 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern fuzzy thresh is 6 / eth / ipv4 src is 192.168.0.1 dst is 192.168.0.2 / sctp / raw relative is 0 search is 0 offset is 48 limit is 0 pattern is ab / end actions queue index 3 / end", "created", ) @@ -3788,7 +3788,7 @@ class TestGeneric_flow_api(TestCase): # ipv6-other-flexbytes if self.nic in ["IXGBE_10G-82599_SFP"]: - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) self.pmdout.start_testpmd( @@ -3796,16 +3796,16 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=signature --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern fuzzy thresh is 6 / ipv6 src is 2001::1 dst is 2001::2 / raw relative is 0 search is 0 offset is 56 limit is 0 pattern is 86 / end actions queue index 4 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern fuzzy thresh is 6 / ipv6 src is 2001::1 dst is 2001::2 / raw relative is 0 search is 0 offset is 56 limit is 0 pattern is 86 / end actions queue index 4 / end", "created", ) @@ -3837,63 +3837,63 @@ class TestGeneric_flow_api(TestCase): "%s" % self.pf_cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # create the flow rules # l2_payload - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 14 pattern is fhds / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 14 pattern is fhds / end actions queue index 1 / end", "created", ) # ipv4 packet - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 34 pattern is ab / end actions queue index 2 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 34 pattern is ab / end actions queue index 2 / end", "created", ) # ipv6 packet - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 58 pattern is efgh / end actions queue index 3 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 58 pattern is efgh / end actions queue index 3 / end", "created", ) # 3 fields relative is 0 - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 38 pattern is ab / raw relative is 0 offset is 34 pattern is cd / raw relative is 0 offset is 42 pattern is efgh / end actions queue index 4 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 38 pattern is ab / raw relative is 0 offset is 34 pattern is cd / raw relative is 0 offset is 42 pattern is efgh / end actions queue index 4 / end", "created", ) # 4 fields relative is 0 and 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 48 pattern is ab / raw relative is 1 offset is 0 pattern is cd / raw relative is 0 offset is 44 pattern is efgh / raw relative is 1 offset is 10 pattern is hijklmnopq / end actions queue index 5 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 48 pattern is ab / raw relative is 1 offset is 0 pattern is cd / raw relative is 0 offset is 44 pattern is efgh / raw relative is 1 offset is 10 pattern is hijklmnopq / end actions queue index 5 / end", "created", ) # 3 fields offset confilict - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 64 pattern is ab / raw relative is 1 offset is 4 pattern is cdefgh / raw relative is 0 offset is 68 pattern is klmn / end actions queue index 6 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 64 pattern is ab / raw relative is 1 offset is 4 pattern is cdefgh / raw relative is 0 offset is 68 pattern is klmn / end actions queue index 6 / end", "created", ) @@ -3957,47 +3957,47 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="0", verify_mac=self.outer_mac ) - self.dut.send_expect("flow flush 0", "testpmd> ", 120) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 120) # 1 field 128bytes - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 128 pattern is ab / end actions queue index 1 / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 128 pattern is ab / end actions queue index 1 / end", "Failed to create flow", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 126 pattern is abcd / end actions queue index 1 / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 126 pattern is abcd / end actions queue index 1 / end", "Failed to create flow", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 126 pattern is ab / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 126 pattern is ab / end actions queue index 1 / end", "created", ) # 2 field 128bytes - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 68 pattern is ab / raw relative is 1 offset is 58 pattern is cd / end actions queue index 2 / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 68 pattern is ab / raw relative is 1 offset is 58 pattern is cd / end actions queue index 2 / end", "Failed to create flow", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern raw relative is 0 offset is 68 pattern is ab / raw relative is 1 offset is 56 pattern is cd / end actions queue index 2 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern raw relative is 0 offset is 68 pattern is ab / raw relative is 1 offset is 56 pattern is cd / end actions queue index 2 / end", "created", ) @@ -4058,11 +4058,11 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=perfect-mac-vlan --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("vlan set strip off 0", "testpmd> ", 120) - self.dut.send_expect("vlan set filter off 0", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("vlan set strip off 0", "testpmd> ", 120) + self.sut_node.send_expect("vlan set filter off 0", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # create the flow rules @@ -4127,9 +4127,9 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=perfect-tunnel --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # create the flow rules @@ -4173,9 +4173,9 @@ class TestGeneric_flow_api(TestCase): "--pkt-filter-mode=perfect-tunnel --disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # create the flow rules @@ -4232,10 +4232,10 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci, ) - self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) self.session_secondary.send_expect( "%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" @@ -4403,9 +4403,9 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), "-a %s --file-prefix=pf --socket-mem 1024,1024 --legacy-mem" % self.pf_pci, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) self.session_secondary.send_expect( "%s -c 0x1e0000 -n 4 --socket-mem 1024,1024 --legacy-mem -a %s --file-prefix=vf1 -- -i --rxq=4 --txq=4 --disable-rss" @@ -4552,22 +4552,22 @@ class TestGeneric_flow_api(TestCase): "%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 20) - self.dut.send_expect("set verbose 1", "testpmd> ", 20) - self.dut.send_expect("start", "testpmd> ", 20) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 20) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 20) + self.sut_node.send_expect("start", "testpmd> ", 20) time.sleep(2) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 32 / end actions queue index 2 / end", "created", ) - out = self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + out = self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule 0 delete failed") - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 32 / end actions queue index 2 / end", "created", ) @@ -4591,22 +4591,22 @@ class TestGeneric_flow_api(TestCase): "%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 20) - self.dut.send_expect("set verbose 1", "testpmd> ", 20) - self.dut.send_expect("start", "testpmd> ", 20) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 20) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 20) + self.sut_node.send_expect("start", "testpmd> ", 20) time.sleep(2) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp src is 32 / end actions queue index 2 / end", "created", ) - out = self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + out = self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule 0 delete failed") - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp dst is 32 / end actions queue index 2 / end", "created", ) @@ -4646,33 +4646,33 @@ class TestGeneric_flow_api(TestCase): "Default", " --portmask=0x1 --rxq=%d --txq=%d" % (queue, queue) ) - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("vlan set extend on 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("vlan set extend on 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / end actions rss types l2-payload end queues end func toeplitz / end", "testpmd> ", ) - self.dut.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") res = self.pmdout.wait_link_status_up("all") self.verify(res is True, "link is down") - self.send_packet(self.tester_itf, "l2_payload") + self.send_packet(self.tg_itf, "l2_payload") - # set flow rss type s-vlan c-vlan set by testpmd on dut - self.dut.send_expect( + # set flow rss type s-vlan c-vlan set by testpmd on SUT + self.sut_node.send_expect( "flow create 0 ingress pattern eth / end actions rss types s-vlan c-vlan end key_len 0 queues end / end", "testpmd> ", ) - self.send_packet(self.tester_itf, "l2_payload") + self.send_packet(self.tg_itf, "l2_payload") - self.send_packet(self.tester_itf, "l2_payload", enable="ovlan") + self.send_packet(self.tg_itf, "l2_payload", enable="ovlan") - self.send_packet(self.tester_itf, "l2_payload", enable="ivlan") + self.send_packet(self.tg_itf, "l2_payload", enable="ivlan") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4724,32 +4724,32 @@ class TestGeneric_flow_api(TestCase): "%s" % self.cores, "--disable-rss --rxq=%d --txq=%d" % (MAX_QUEUE + 1, MAX_QUEUE + 1), ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index 1 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth type is 0x0806 / end actions queue index 2 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 dst is 2.2.2.5 src is 2.2.2.4 proto is 17 / udp dst is 1 src is 1 / end actions queue index 3 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index 1 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth type is 0x0806 / end actions queue index 2 / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 dst is 2.2.2.5 src is 2.2.2.4 proto is 17 / udp dst is 1 src is 1 / end actions queue index 3 / end", "create", ) @@ -4777,7 +4777,7 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="3", verify_mac=self.pf_mac ) # destroy rule 2 - out = self.dut.send_expect("flow destroy 0 rule 2", "testpmd> ") + out = self.sut_node.send_expect("flow destroy 0 rule 2", "testpmd> ") p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule 2 delete failed") @@ -4804,7 +4804,7 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="0", verify_mac=self.pf_mac ) # destroy rule 1 - out = self.dut.send_expect("flow destroy 0 rule 1", "testpmd> ") + out = self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd> ") p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule 1 delete failed") @@ -4824,7 +4824,7 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="1", verify_mac=self.pf_mac ) # destroy rule 0 - out = self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + out = self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule 0 delete failed") @@ -4836,7 +4836,7 @@ class TestGeneric_flow_api(TestCase): self.verify_result( "pf", expect_rxpkts="1", expect_queue="0", verify_mac=self.pf_mac ) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") def test_jumbo_frame_size(self): @@ -4866,23 +4866,23 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=4 --txq=4 --portmask=%s --nb-cores=4 --nb-ports=1 --mbcache=200 --mbuf-size=2048 --max-pkt-len=9600" % portMask, ) - port = self.tester.get_local_port(valports[0]) - txItf = self.tester.get_interface(port) - - port = self.tester.get_local_port(valports[1]) - rxItf = self.tester.get_interface(port) - self.tester.send_expect("ifconfig %s mtu %s" % (txItf, 9200), "# ") - self.tester.send_expect("ifconfig %s mtu %s" % (rxItf, 9200), "# ") - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + port = self.tg_node.get_local_port(valports[0]) + txItf = self.tg_node.get_interface(port) + + port = self.tg_node.get_local_port(valports[1]) + rxItf = self.tg_node.get_interface(port) + self.tg_node.send_expect("ifconfig %s mtu %s" % (txItf, 9200), "# ") + self.tg_node.send_expect("ifconfig %s mtu %s" % (rxItf, 9200), "# ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) - self.dut.send_expect( + self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index 2 / end", "validated", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp flags spec 0x02 flags mask 0x02 / end actions queue index 2 / end", "created", ) @@ -4902,7 +4902,7 @@ class TestGeneric_flow_api(TestCase): "pf", expect_rxpkts="1", expect_queue="0", verify_mac="ff:ff:ff:ff:ff:ff" ) # destroy rule - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") self.sendpkt( pktstr='Ether(dst="%s")/IP(src="2.2.2.5",dst="2.2.2.4")/TCP(dport=80,flags="S")/Raw(load="\x50"*8962)' % self.pf_mac @@ -4911,10 +4911,10 @@ class TestGeneric_flow_api(TestCase): self.verify_result( "pf", expect_rxpkts="1", expect_queue="0", verify_mac=self.pf_mac ) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") - self.tester.send_expect("ifconfig %s mtu %s" % (txItf, 1500), "# ") - self.tester.send_expect("ifconfig %s mtu %s" % (rxItf, 1500), "# ") + self.tg_node.send_expect("ifconfig %s mtu %s" % (txItf, 1500), "# ") + self.tg_node.send_expect("ifconfig %s mtu %s" % (rxItf, 1500), "# ") def test_64_queues(self): @@ -4928,16 +4928,16 @@ class TestGeneric_flow_api(TestCase): "--disable-rss --rxq=64 --txq=64 --portmask=%s --nb-cores=4 --total-num-mbufs=%d" % (portMask, total_mbufs), ) - self.dut.send_expect("set stat_qmap rx %s 0 0" % valports[0], "testpmd> ") - self.dut.send_expect("set stat_qmap rx %s 0 0" % valports[1], "testpmd> ") - self.dut.send_expect("vlan set strip off %s" % valports[0], "testpmd> ") - self.dut.send_expect("vlan set strip off %s" % valports[1], "testpmd> ") - self.dut.send_expect("vlan set filter off %s" % valports[0], "testpmd> ") - self.dut.send_expect("vlan set filter off %s" % valports[1], "testpmd> ") + self.sut_node.send_expect("set stat_qmap rx %s 0 0" % valports[0], "testpmd> ") + self.sut_node.send_expect("set stat_qmap rx %s 0 0" % valports[1], "testpmd> ") + self.sut_node.send_expect("vlan set strip off %s" % valports[0], "testpmd> ") + self.sut_node.send_expect("vlan set strip off %s" % valports[1], "testpmd> ") + self.sut_node.send_expect("vlan set filter off %s" % valports[0], "testpmd> ") + self.sut_node.send_expect("vlan set filter off %s" % valports[1], "testpmd> ") queue = ["16", "32", "64"] for i in [0, 1, 2]: if i == 2: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "set stat_qmap rx %s %s %s" % (valports[0], queue[i], (i + 1)), "testpmd> ", ) @@ -4951,13 +4951,13 @@ class TestGeneric_flow_api(TestCase): ) + "end actions queue index {} / end".format(queue[i]) ) - out = self.dut.send_expect(cmd, "testpmd> ") + out = self.sut_node.send_expect(cmd, "testpmd> ") if "Invalid argument" not in out: set_filter_flag = 0 break continue else: - self.dut.send_expect( + self.sut_node.send_expect( "set stat_qmap rx %s %s %s" % (valports[0], queue[i], (i + 1)), "testpmd> ", ) @@ -4968,8 +4968,8 @@ class TestGeneric_flow_api(TestCase): ) + "end actions queue index {} / end".format(queue[i]) ) - self.dut.send_expect(cmd, "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect(cmd, "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) global filters_index filters_index = i if filters_index == 0: @@ -4983,14 +4983,14 @@ class TestGeneric_flow_api(TestCase): % self.pf_mac ) time.sleep(1) - out = self.dut.send_expect("stop", "testpmd> ") + out = self.sut_node.send_expect("stop", "testpmd> ") p = re.compile(r"Forward Stats for RX Port= \d+/Queue=(\s?\d+)") res = p.findall(out) queues = [int(i) for i in res] if queues[0] != int(queue[i]): packet_flag = 0 break - self.dut.send_expect("quit", "#", timeout=30) + self.sut_node.send_expect("quit", "#", timeout=30) self.verify(set_filter_flag == 1, "set filters error") self.verify(packet_flag == 1, "packet pass assert error") else: @@ -5164,12 +5164,12 @@ class TestGeneric_flow_api(TestCase): Run after each test case. """ self.destroy_env() - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) + self.sut_node.kill_all() + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) diff --git a/tests/TestSuite_hello_world.py b/tests/TestSuite_hello_world.py index 6f931d92..dc1c6f64 100644 --- a/tests/TestSuite_hello_world.py +++ b/tests/TestSuite_hello_world.py @@ -18,8 +18,8 @@ class TestHelloWorld(TestCase): hello_world Prerequisites: helloworld build pass """ - out = self.dut.build_dpdk_apps("examples/helloworld") - self.app_helloworld_path = self.dut.apps_name["helloworld"] + out = self.sut_node.build_dpdk_apps("examples/helloworld") + self.app_helloworld_path = self.sut_node.apps_name["helloworld"] self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -38,10 +38,10 @@ class TestHelloWorld(TestCase): """ # get the mask for the first core - cores = self.dut.get_core_list("1S/1C/1T") - eal_para = self.dut.create_eal_parameters(cores="1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/1C/1T") cmdline = "./%s %s" % (self.app_helloworld_path, eal_para) - out = self.dut.send_expect(cmdline, "# ", 30) + out = self.sut_node.send_expect(cmdline, "# ", 30) self.verify( "hello from core %s" % cores[0] in out, "EAL not started on core%s" % cores[0], @@ -54,11 +54,11 @@ class TestHelloWorld(TestCase): """ # get the maximum logical core number - cores = self.dut.get_core_list("all") - eal_para = self.dut.create_eal_parameters(cores=cores) + cores = self.sut_node.get_core_list("all") + eal_para = self.sut_node.create_eal_parameters(cores=cores) cmdline = "./%s %s " % (self.app_helloworld_path, eal_para) - out = self.dut.send_expect(cmdline, "# ", 50) + out = self.sut_node.send_expect(cmdline, "# ", 50) for core in cores: self.verify( "hello from core %s" % core in out, diff --git a/tests/TestSuite_hotplug.py b/tests/TestSuite_hotplug.py index 0f1e17fd..11099a81 100644 --- a/tests/TestSuite_hotplug.py +++ b/tests/TestSuite_hotplug.py @@ -12,7 +12,7 @@ import re import time import framework.utils as utils -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -25,24 +25,24 @@ class TestPortHotPlug(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/4C/1T") - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + cores = self.sut_node.get_core_list("1S/4C/1T") + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") self.coremask = utils.create_mask(cores) - self.port = len(self.dut_ports) - 1 + self.port = len(self.sut_ports) - 1 if self.drivername == "vfio-pci:noiommu": self.driver_name = "vfio-pci" else: self.driver_name = self.drivername - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ Run before each test case. """ - self.dut.send_expect( - "./usertools/dpdk-devbind.py -u %s" % self.dut.ports_info[self.port]["pci"], + self.sut_node.send_expect( + "./usertools/dpdk-devbind.py -u %s" % self.sut_node.ports_info[self.port]["pci"], "#", 60, ) @@ -52,25 +52,25 @@ class TestPortHotPlug(TestCase): attach port """ # dpdk hotplug discern NIC by pci bus and include domid - out = self.dut.send_expect( - "port attach %s" % self.dut.ports_info[port]["pci"], "testpmd>", 60 + out = self.sut_node.send_expect( + "port attach %s" % self.sut_node.ports_info[port]["pci"], "testpmd>", 60 ) self.verify("is attached" in out, "Failed to attach") - out = self.dut.send_expect("port start %s" % port, "testpmd>", 120) + out = self.sut_node.send_expect("port start %s" % port, "testpmd>", 120) self.verify("Configuring Port" in out, "Failed to start port") # sleep 10 seconds for Intel® Ethernet 700 Series update link stats time.sleep(10) - self.dut.send_expect("show port info %s" % port, "testpmd>", 60) + self.sut_node.send_expect("show port info %s" % port, "testpmd>", 60) def detach(self, port): """ detach port """ - out = self.dut.send_expect("port stop %s" % port, "testpmd>", 60) + out = self.sut_node.send_expect("port stop %s" % port, "testpmd>", 60) self.verify("Stopping ports" in out, "Failed to stop port") # sleep 10 seconds for Intel® Ethernet 700 Series update link stats time.sleep(10) - out = self.dut.send_expect("port detach %s" % port, "testpmd>", 60) + out = self.sut_node.send_expect("port detach %s" % port, "testpmd>", 60) self.verify("is detached" in out, "Failed to detach port") def test_after_attach(self): @@ -78,114 +78,114 @@ class TestPortHotPlug(TestCase): first run testpmd after attach port """ cmd = "%s %s -- -i" % (self.path, self.eal_para) - self.dut.send_expect(cmd, "testpmd>", 60) - session_secondary = self.dut.new_session() + self.sut_node.send_expect(cmd, "testpmd>", 60) + session_secondary = self.sut_node.new_session() session_secondary.send_expect( "./usertools/dpdk-devbind.py --bind=%s %s" - % (self.driver_name, self.dut.ports_info[self.port]["pci"]), + % (self.driver_name, self.sut_node.ports_info[self.port]["pci"]), "#", 60, ) - self.dut.close_session(session_secondary) + self.sut_node.close_session(session_secondary) self.attach(self.port) - self.dut.send_expect("start", "testpmd>", 60) - out = self.dut.send_expect("port detach %s" % self.port, "testpmd>", 60) + self.sut_node.send_expect("start", "testpmd>", 60) + out = self.sut_node.send_expect("port detach %s" % self.port, "testpmd>", 60) self.verify("Port not stopped" in out, "able to detach port without stopping") - self.dut.send_expect("stop", "testpmd>", 60) + self.sut_node.send_expect("stop", "testpmd>", 60) self.detach(self.port) self.attach(self.port) - self.dut.send_expect("start", "testpmd>", 60) - out = self.dut.send_expect("port detach %s" % self.port, "testpmd>", 60) + self.sut_node.send_expect("start", "testpmd>", 60) + out = self.sut_node.send_expect("port detach %s" % self.port, "testpmd>", 60) self.verify("Port not stopped" in out, "able to detach port without stopping") - self.dut.send_expect("clear port stats %s" % self.port, "testpmd>", 60) + self.sut_node.send_expect("clear port stats %s" % self.port, "testpmd>", 60) self.send_packet(self.port) - out = self.dut.send_expect("show port stats %s" % self.port, "testpmd>", 60) + out = self.sut_node.send_expect("show port stats %s" % self.port, "testpmd>", 60) packet = re.search("RX-packets:\s*(\d*)", out) sum_packet = packet.group(1) self.verify(int(sum_packet) == 1, "Insufficient the received package") - self.dut.send_expect("quit", "#", 60) + self.sut_node.send_expect("quit", "#", 60) def send_packet(self, port): """ Send a packet to port """ - self.dmac = self.dut.get_mac_address(port) - txport = self.tester.get_local_port(port) - self.txItf = self.tester.get_interface(txport) - pkt = Packet(pkt_type="UDP") - pkt.config_layer( + self.dmac = self.sut_node.get_mac_address(port) + txport = self.tg_node.get_local_port(port) + self.txItf = self.tg_node.get_interface(txport) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer( "ether", { "dst": self.dmac, }, ) - pkt.send_pkt(self.tester, tx_port=self.txItf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf) def test_before_attach(self): """ first attach port after run testpmd """ - session_secondary = self.dut.new_session() + session_secondary = self.sut_node.new_session() session_secondary.send_expect( "./usertools/dpdk-devbind.py --bind=%s %s" - % (self.driver_name, self.dut.ports_info[self.port]["pci"]), + % (self.driver_name, self.sut_node.ports_info[self.port]["pci"]), "#", 60, ) - self.dut.close_session(session_secondary) + self.sut_node.close_session(session_secondary) cmd = "%s %s -- -i" % (self.path, self.eal_para) - self.dut.send_expect(cmd, "testpmd>", 60) + self.sut_node.send_expect(cmd, "testpmd>", 60) self.detach(self.port) self.attach(self.port) - self.dut.send_expect("start", "testpmd>", 60) - out = self.dut.send_expect("port detach %s" % self.port, "testpmd>", 60) + self.sut_node.send_expect("start", "testpmd>", 60) + out = self.sut_node.send_expect("port detach %s" % self.port, "testpmd>", 60) self.verify("Port not stopped" in out, "able to detach port without stopping") - self.dut.send_expect("clear port stats %s" % self.port, "testpmd>", 60) + self.sut_node.send_expect("clear port stats %s" % self.port, "testpmd>", 60) self.send_packet(self.port) - out = self.dut.send_expect("show port stats %s" % self.port, "testpmd>", 60) + out = self.sut_node.send_expect("show port stats %s" % self.port, "testpmd>", 60) packet = re.search("RX-packets:\s*(\d*)", out) sum_packet = packet.group(1) self.verify(int(sum_packet) == 1, "Insufficient the received package") - self.dut.send_expect("quit", "#", 60) + self.sut_node.send_expect("quit", "#", 60) def test_port_detach_attach_for_vhost_user_virtio_user(self): vdev = "eth_vhost0,iface=vhost-net,queues=1" iface = "vhost-net1" - path = self.dut.base_dir + os.path.sep + iface + path = self.sut_node.base_dir + os.path.sep + iface path = path.replace("~", "/root") - self.dut.send_expect("rm -rf %s" % iface, "# ") - cores = self.dut.get_core_list("all") + self.sut_node.send_expect("rm -rf %s" % iface, "# ") + cores = self.sut_node.get_core_list("all") self.verify(len(cores) > 8, "insufficient cores for this case") - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( no_pci=True, cores=cores[1:5], vdevs=[vdev], prefix="vhost" ) testpmd_cmd = "%s " % self.path + eal_param + " -- -i" - self.dut.send_expect(testpmd_cmd, "testpmd>", timeout=60) - self.dut.send_expect("port stop 0", "testpmd>") - out = self.dut.send_expect("port detach 0", "testpmd>") + self.sut_node.send_expect(testpmd_cmd, "testpmd>", timeout=60) + self.sut_node.send_expect("port stop 0", "testpmd>") + out = self.sut_node.send_expect("port detach 0", "testpmd>") self.verify("Device is detached" in out, "Failed to detach") - stats = self.dut.send_expect( + stats = self.sut_node.send_expect( "ls %s" % path, "#", timeout=3, alt_session=True, verify=True ) self.verify(stats == 2, "port detach failed") time.sleep(1) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "port attach eth_vhost1,iface=%s,queues=1" % iface, "testpmd>" ) self.verify("Port 0 is attached." in out, "Failed to attach") - self.dut.send_expect("port start 0", "testpmd>") - out = self.dut.send_expect( + self.sut_node.send_expect("port start 0", "testpmd>") + out = self.sut_node.send_expect( "ls %s" % path, "#", timeout=3, alt_session=True, verify=True ) self.verify(iface in out, "port attach failed") - self.session2 = self.dut.create_session(name="virtio_user") - eal_param = self.dut.create_eal_parameters( + self.session2 = self.sut_node.create_session(name="virtio_user") + eal_param = self.sut_node.create_eal_parameters( no_pci=True, fixed_prefix="virtio1", cores=cores[5:9] ) - testpmd_cmd2 = "%s/%s " % (self.dut.base_dir, self.path) + eal_param + " -- -i" + testpmd_cmd2 = "%s/%s " % (self.sut_node.base_dir, self.path) + eal_param + " -- -i" self.session2.send_expect(testpmd_cmd2, "testpmd>", timeout=60) self.session2.send_expect( "port attach net_virtio_user1,mac=00:01:02:03:04:05,path=%s,queues=1,packed_vq=1,mrg_rxbuf=1,in_order=0" @@ -193,11 +193,11 @@ class TestPortHotPlug(TestCase): "testpmd", ) self.session2.send_expect("port start 0", "testpmd>", timeout=60) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ls %s" % path, "#", timeout=3, alt_session=True, verify=True ) self.verify(iface in out, "port attach failed") - self.dut.send_expect("start", "testpmd") + self.sut_node.send_expect("start", "testpmd") self.session2.send_expect("start tx_first 32", "testpmd") out = self.session2.send_expect("show port stats 0", "testpmd") rx_pkts = int(re.search("RX-packets: (\d+)", out).group(1)) @@ -210,18 +210,18 @@ class TestPortHotPlug(TestCase): self.session2.send_expect("show port stats 0", "testpmd", timeout=2) self.session2.send_expect("stop", "testpmd", timeout=2) self.session2.send_expect("quit", "#", timeout=2) - self.dut.send_expect("stop", "testpmd", timeout=2) - self.dut.send_expect("quit", "#", timeout=2) + self.sut_node.send_expect("stop", "testpmd", timeout=2) + self.sut_node.send_expect("quit", "#", timeout=2) self.session2.close() def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() - self.dut.send_expect( + self.sut_node.kill_all() + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --bind=%s %s" - % (self.driver_name, self.dut.ports_info[self.port]["pci"]), + % (self.driver_name, self.sut_node.ports_info[self.port]["pci"]), "#", 60, ) diff --git a/tests/TestSuite_hotplug_mp.py b/tests/TestSuite_hotplug_mp.py index 444f60a4..0ba79b07 100644 --- a/tests/TestSuite_hotplug_mp.py +++ b/tests/TestSuite_hotplug_mp.py @@ -21,21 +21,21 @@ class TestHotplugMp(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.intf0 = self.dut.ports_info[0]["intf"] - self.pci0 = self.dut.ports_info[0]["pci"] - out = self.dut.build_dpdk_apps("./examples/multi_process/hotplug_mp") - self.app_path = self.dut.apps_name["hotplug_mp"] + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.intf0 = self.sut_node.ports_info[0]["intf"] + self.pci0 = self.sut_node.ports_info[0]["pci"] + out = self.sut_node.build_dpdk_apps("./examples/multi_process/hotplug_mp") + self.app_path = self.sut_node.apps_name["hotplug_mp"] self.verify("Error" not in out, "Compilation failed") # Start one new session to run primary process - self.session_pri = self.dut.new_session() + self.session_pri = self.sut_node.new_session() # Start two new sessions to run secondary process - self.session_sec_1 = self.dut.new_session() - self.session_sec_2 = self.dut.new_session() - self.session_vhost = self.dut.new_session() + self.session_sec_1 = self.sut_node.new_session() + self.session_sec_2 = self.sut_node.new_session() + self.session_vhost = self.sut_node.new_session() if self.drivername != "": - self.dut.bind_interfaces_linux(self.kdriver) + self.sut_node.bind_interfaces_linux(self.kdriver) def set_up(self): """ @@ -178,9 +178,9 @@ class TestHotplugMp(TestCase): # bound to pmd if opt_plug in ["plugin", "hotplug", "crossplug"]: self.multi_process_setup() - self.dut.bind_interfaces_linux(self.drivername) + self.sut_node.bind_interfaces_linux(self.drivername) elif opt_plug == "plugout": - self.dut.bind_interfaces_linux(self.drivername) + self.sut_node.bind_interfaces_linux(self.drivername) self.multi_process_setup() time.sleep(3) if opt_plug in ["plugin", "plugout"]: @@ -198,7 +198,7 @@ class TestHotplugMp(TestCase): self.attach_detach(process, 1, "plugout", flg_loop, dev) self.multi_process_quit() - self.dut.bind_interfaces_linux(self.kdriver) + self.sut_node.bind_interfaces_linux(self.kdriver) def attach_detach_vdev( self, @@ -216,8 +216,8 @@ class TestHotplugMp(TestCase): EAL: Driver cannot attach the device (net_af_packet) """ if not iface: - self.dut.send_expect("ifconfig %s up" % self.intf0, "#") - self.verify(self.dut.is_interface_up(intf=self.intf0), "Wrong link status") + self.sut_node.send_expect("ifconfig %s up" % self.intf0, "#") + self.verify(self.sut_node.is_interface_up(intf=self.intf0), "Wrong link status") self.multi_process_setup() for i in range(test_loop): @@ -365,9 +365,9 @@ class TestHotplugMp(TestCase): """ vdev = "net_virtio_user0" self.path = "/home/vhost-net" - pmd_path = self.dut.apps_name["test-pmd"] + pmd_path = self.sut_node.apps_name["test-pmd"] self.session_vhost.send_expect("rm -rf %s" % self.path, "#") - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( no_pci=True, prefix="vhost", vdevs=["eth_vhost0,iface=%s" % self.path] ) param = " -- -i" @@ -410,8 +410,8 @@ class TestHotplugMp(TestCase): self.logger.info(e) raise Exception(e) finally: - self.dut.send_expect("rm -rf %s" % self.path, "#") - self.dut.kill_all() + self.sut_node.send_expect("rm -rf %s" % self.path, "#") + self.sut_node.kill_all() def tear_down(self): """ @@ -423,6 +423,6 @@ class TestHotplugMp(TestCase): """ Run after each test suite. """ - self.dut.bind_interfaces_linux(self.drivername) - self.dut.close_session(self.dut) - self.dut.kill_all() + self.sut_node.bind_interfaces_linux(self.drivername) + self.sut_node.close_session(self.sut_node) + self.sut_node.kill_all() diff --git a/tests/TestSuite_i40e_rss_input.py b/tests/TestSuite_i40e_rss_input.py index 4a88af1b..1c8cfd08 100644 --- a/tests/TestSuite_i40e_rss_input.py +++ b/tests/TestSuite_i40e_rss_input.py @@ -24,7 +24,7 @@ import random import re import time -import framework.dut as dut +import framework.sut_node as sut import framework.utils as utils from framework.pmd_output import PmdOutput @@ -66,13 +66,13 @@ class TestI40ERssInput(TestCase): ], "NIC Unsupported: " + str(self.nic), ) - ports = self.dut.get_ports(self.nic) + ports = self.sut_node.get_ports(self.nic) self.verify(len(ports) >= 1, "Not enough ports available") - dutPorts = self.dut.get_ports(self.nic) - self.dut_ports = dutPorts - localPort = self.tester.get_local_port(dutPorts[0]) - self.itf = self.tester.get_interface(localPort) - self.pmdout = PmdOutput(self.dut) + sutPorts = self.sut_node.get_ports(self.nic) + self.sut_ports = sutPorts + localPort = self.tg_node.get_local_port(sutPorts[0]) + self.itf = self.tg_node.get_interface(localPort) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ @@ -86,9 +86,9 @@ class TestI40ERssInput(TestCase): Sends packets. """ global reta_lines - self.tester.scapy_foreground() - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) if "ipv4-dst-only" in inputsets: dstip4 = '"192.168.0.2"' @@ -128,8 +128,8 @@ class TestI40ERssInput(TestCase): r', proto=47)/GRE(key_present=1,proto=2048,key=67108863)/IP()], iface="%s")' % (itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src=' % ( @@ -144,8 +144,8 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += dstport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp-sym": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src=' % ( @@ -160,8 +160,8 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += srcport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-udp": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src=' % ( @@ -176,8 +176,8 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += dstport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-sctp": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src=' % ( @@ -192,8 +192,8 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += dstport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-other": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src=' % ( @@ -204,8 +204,8 @@ class TestI40ERssInput(TestCase): packet += r", dst=" packet += dstip6 packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-tcp": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src=' % ( @@ -220,8 +220,8 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += dstport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-udp": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src=' % ( @@ -236,8 +236,8 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += dstport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-sctp": packet = r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src=' % ( @@ -252,14 +252,14 @@ class TestI40ERssInput(TestCase): packet += r",dport=" packet += dstport packet += r')], iface="%s")' % (itf) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.dut.get_session_output(timeout=1) - self.dut.send_expect("stop", "testpmd>") + out = self.sut_node.get_session_output(timeout=1) + self.sut_node.send_expect("stop", "testpmd>") lines = out.split("\r\n") reta_line = {} # collect the hash result and the queue id @@ -310,15 +310,15 @@ class TestI40ERssInput(TestCase): """ Create testpmd command """ - app_name = self.dut.apps_name["test-pmd"] - eal_params = self.dut.create_eal_parameters( - cores="1S/4C/1T", ports=[self.dut_ports[0]] + app_name = self.sut_node.apps_name["test-pmd"] + eal_params = self.sut_node.create_eal_parameters( + cores="1S/4C/1T", ports=[self.sut_ports[0]] ) cmd = app_name + eal_params + "-- -i --portmask=0x1 --rxq=4 --txq=4" - self.dut.send_expect(cmd, "testpmd> ", 30) - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 10) + self.sut_node.send_expect(cmd, "testpmd> ", 30) + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 10) time.sleep(2) res = self.pmdout.wait_link_status_up("all") @@ -332,8 +332,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable default input set - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable default input set + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end func symmetric_toeplitz queues end / end", "testpmd> ", ) @@ -341,7 +341,7 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp-sym", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -366,8 +366,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only end queues end / end", "testpmd> ", ) @@ -376,14 +376,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -423,8 +423,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only end queues end / end", "testpmd> ", ) @@ -433,14 +433,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -480,8 +480,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l4-src-only end queues end / end", "testpmd> ", ) @@ -490,14 +490,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -537,8 +537,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l4-dst-only end queues end / end", "testpmd> ", ) @@ -547,14 +547,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -594,8 +594,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) @@ -604,14 +604,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -651,8 +651,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l4-src-only end queues end / end", "testpmd> ", ) @@ -661,14 +661,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -708,8 +708,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -718,14 +718,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -765,8 +765,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -775,14 +775,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -822,8 +822,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -832,14 +832,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -879,8 +879,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l4-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -889,14 +889,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -936,8 +936,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -946,14 +946,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -993,8 +993,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -1003,14 +1003,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1050,8 +1050,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1060,14 +1060,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1107,8 +1107,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1117,14 +1117,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-tcp", inputsets) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1164,22 +1164,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) inputsets = ["ipv4-src-only", "ipv4-dst-only", "l4-dst-only", "l4-src-only"] self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1204,8 +1204,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only end queues end / end", "testpmd> ", ) @@ -1214,14 +1214,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1261,8 +1261,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-dst-only end queues end / end", "testpmd> ", ) @@ -1271,14 +1271,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1318,8 +1318,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l4-src-only end queues end / end", "testpmd> ", ) @@ -1328,14 +1328,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1375,8 +1375,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l4-dst-only end queues end / end", "testpmd> ", ) @@ -1385,14 +1385,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1432,8 +1432,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) @@ -1442,14 +1442,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1489,8 +1489,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1499,14 +1499,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1546,8 +1546,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -1556,14 +1556,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1603,8 +1603,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1613,14 +1613,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1660,8 +1660,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -1670,14 +1670,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1717,8 +1717,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l4-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -1727,14 +1727,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1774,8 +1774,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1784,14 +1784,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1831,8 +1831,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -1841,14 +1841,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1888,8 +1888,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1898,14 +1898,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -1945,8 +1945,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -1955,14 +1955,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-udp", inputsets) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2002,22 +2002,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) inputsets = ["ipv4-src-only", "ipv4-dst-only", "l4-dst-only", "l4-src-only"] self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2042,8 +2042,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only end queues end / end", "testpmd> ", ) @@ -2052,14 +2052,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2099,8 +2099,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-dst-only end queues end / end", "testpmd> ", ) @@ -2109,14 +2109,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2156,8 +2156,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l4-src-only end queues end / end", "testpmd> ", ) @@ -2166,14 +2166,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2213,8 +2213,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l4-dst-only end queues end / end", "testpmd> ", ) @@ -2223,14 +2223,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2270,8 +2270,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) @@ -2280,14 +2280,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2327,8 +2327,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l4-src-only end queues end / end", "testpmd> ", ) @@ -2337,14 +2337,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2384,8 +2384,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -2394,14 +2394,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2441,8 +2441,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -2451,14 +2451,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2498,8 +2498,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -2508,14 +2508,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2555,8 +2555,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l4-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -2565,14 +2565,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2612,8 +2612,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -2622,14 +2622,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2669,8 +2669,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -2679,14 +2679,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2726,8 +2726,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -2736,14 +2736,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2783,8 +2783,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -2793,14 +2793,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-sctp", inputsets) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2840,22 +2840,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp l3-src-only l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) inputsets = ["ipv4-src-only", "ipv4-dst-only", "l4-dst-only", "l4-src-only"] self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2880,8 +2880,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only end queues end / end", "testpmd> ", ) @@ -2890,14 +2890,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2937,8 +2937,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-dst-only end queues end / end", "testpmd> ", ) @@ -2947,14 +2947,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -2994,8 +2994,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l4-src-only end queues end / end", "testpmd> ", ) @@ -3004,14 +3004,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3051,8 +3051,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l4-dst-only end queues end / end", "testpmd> ", ) @@ -3061,14 +3061,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3108,8 +3108,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) @@ -3118,14 +3118,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3165,8 +3165,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l4-src-only end queues end / end", "testpmd> ", ) @@ -3175,14 +3175,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3222,8 +3222,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -3232,14 +3232,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3279,8 +3279,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -3289,14 +3289,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3336,8 +3336,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -3346,14 +3346,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3393,8 +3393,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l4-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -3403,14 +3403,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3450,8 +3450,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -3460,14 +3460,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3507,8 +3507,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -3517,14 +3517,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3564,8 +3564,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -3574,14 +3574,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3621,8 +3621,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -3631,14 +3631,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-tcp", inputsets) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3678,22 +3678,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp l3-src-only l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) inputsets = ["ipv6-src-only", "ipv6-dst-only", "l4-dst-only", "l4-src-only"] self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-tcp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3718,8 +3718,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only end queues end / end", "testpmd> ", ) @@ -3728,14 +3728,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3775,8 +3775,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-dst-only end queues end / end", "testpmd> ", ) @@ -3785,14 +3785,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3832,8 +3832,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l4-src-only end queues end / end", "testpmd> ", ) @@ -3842,14 +3842,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3889,8 +3889,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l4-dst-only end queues end / end", "testpmd> ", ) @@ -3899,14 +3899,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -3946,8 +3946,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) @@ -3956,14 +3956,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4003,8 +4003,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4013,14 +4013,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4060,8 +4060,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -4070,14 +4070,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4117,8 +4117,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4127,14 +4127,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4174,8 +4174,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -4184,14 +4184,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4231,8 +4231,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l4-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -4241,14 +4241,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4288,8 +4288,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4298,14 +4298,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4345,8 +4345,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -4355,14 +4355,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4402,8 +4402,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4412,14 +4412,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4459,8 +4459,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4469,14 +4469,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-udp", inputsets) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4516,22 +4516,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp l3-src-only l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) inputsets = ["ipv6-src-only", "ipv6-dst-only", "l4-dst-only", "l4-src-only"] self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-udp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4556,8 +4556,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only end queues end / end", "testpmd> ", ) @@ -4566,14 +4566,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4613,8 +4613,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-dst-only end queues end / end", "testpmd> ", ) @@ -4623,14 +4623,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4670,8 +4670,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l4-src-only end queues end / end", "testpmd> ", ) @@ -4680,14 +4680,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4727,8 +4727,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l4-dst-only end queues end / end", "testpmd> ", ) @@ -4737,14 +4737,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4784,8 +4784,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) @@ -4794,14 +4794,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4841,8 +4841,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4851,14 +4851,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4898,8 +4898,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -4908,14 +4908,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -4955,8 +4955,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -4965,14 +4965,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5012,8 +5012,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -5022,14 +5022,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5069,8 +5069,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-port, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-port, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l4-src-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -5079,14 +5079,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5126,8 +5126,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l3-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -5136,14 +5136,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5183,8 +5183,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, dst-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, dst-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l3-dst-only l4-dst-only end queues end / end", "testpmd> ", ) @@ -5193,14 +5193,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5240,8 +5240,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -5250,14 +5250,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5297,8 +5297,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) @@ -5307,14 +5307,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-sctp", inputsets) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5354,22 +5354,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6, dst-port, src-port - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6, dst-port, src-port + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp l3-src-only l3-dst-only l4-dst-only l4-src-only end queues end / end", "testpmd> ", ) inputsets = ["ipv6-src-only", "ipv6-dst-only", "l4-dst-only", "l4-src-only"] self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-sctp", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5394,8 +5394,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other l3-src-only end queues end / end", "testpmd> ", ) @@ -5404,14 +5404,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-other", inputsets) self.send_packet(self.itf, "ipv4-other", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-other", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5451,8 +5451,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other l3-dst-only end queues end / end", "testpmd> ", ) @@ -5461,14 +5461,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv4-other", inputsets) self.send_packet(self.itf, "ipv4-other", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-other", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5508,22 +5508,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv4, dst-ipv4 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv4, dst-ipv4 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) inputsets = ["ipv4-src-only", "ipv4-dst-only"] self.send_packet(self.itf, "ipv4-other", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv4-other", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5548,8 +5548,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other l3-src-only end queues end / end", "testpmd> ", ) @@ -5558,14 +5558,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-other", inputsets) self.send_packet(self.itf, "ipv6-other", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-other", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5605,8 +5605,8 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other l3-dst-only end queues end / end", "testpmd> ", ) @@ -5615,14 +5615,14 @@ class TestI40ERssInput(TestCase): self.send_packet(self.itf, "ipv6-other", inputsets) self.send_packet(self.itf, "ipv6-other", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-other", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5662,22 +5662,22 @@ class TestI40ERssInput(TestCase): self.start_testpmd() - # set hash input set by testpmd on dut, enable src-ipv6, dst-ipv6 - self.dut.send_expect( + # set hash input set by testpmd on SUT, enable src-ipv6, dst-ipv6 + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other l3-src-only l3-dst-only end queues end / end", "testpmd> ", ) inputsets = ["ipv6-src-only", "ipv6-dst-only"] self.send_packet(self.itf, "ipv6-other", inputsets) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "testpmd> ", ) self.send_packet(self.itf, "ipv6-other", inputsets) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) self.result_table_print() result_rows = self.result_table_getrows() @@ -5700,25 +5700,25 @@ class TestI40ERssInput(TestCase): """ self.start_testpmd() - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only end queues end / end", "testpmd> ", ) self.verify("Flow rule validated" in out, "Failed to validated!") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow validate 0 ingress pattern end actions rss types end queues 0 1 end / end", "testpmd> ", ) self.verify("Flow rule validated" in out, "Failed to validated!") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow validate 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only end queues 0 1 end / end", "testpmd> ", ) self.verify("Flow rule validated" not in out, "Failed to validated!") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_flow_query(self): """ @@ -5726,25 +5726,25 @@ class TestI40ERssInput(TestCase): """ self.start_testpmd() - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp l3-src-only end queues end func symmetric_toeplitz / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues end func simple_xor / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 1 2 end / end", "testpmd> ", ) rexp = r"flow query 0 (\d) rss\r\r\nRSS:\r\n queues: ([\S\s]+?)\r\n function: (\S+?)\r\n types:\r\n ([\s\S]+)" - out0 = self.dut.send_expect("flow query 0 0 rss", "testpmd> ") + out0 = self.sut_node.send_expect("flow query 0 0 rss", "testpmd> ") m0 = re.match(rexp, out0.strip()) self.verify( "none" == m0.group(2) @@ -5752,7 +5752,7 @@ class TestI40ERssInput(TestCase): and "ipv4-tcp" == m0.group(4), "Query error", ) - out1 = self.dut.send_expect("flow query 0 1 rss", "testpmd> ") + out1 = self.sut_node.send_expect("flow query 0 1 rss", "testpmd> ") m1 = re.match(rexp, out1.strip()) self.verify( "none" == m1.group(2) @@ -5761,7 +5761,7 @@ class TestI40ERssInput(TestCase): and "l3-src-only" in m1.group(4), "Query error", ) - out2 = self.dut.send_expect("flow query 0 2 rss", "testpmd> ") + out2 = self.sut_node.send_expect("flow query 0 2 rss", "testpmd> ") m2 = re.match(rexp, out2.strip()) self.verify( "none" == m2.group(2) @@ -5769,24 +5769,24 @@ class TestI40ERssInput(TestCase): and "none" == m2.group(4), "Query error", ) - out3 = self.dut.send_expect("flow query 0 3 rss", "testpmd> ") + out3 = self.sut_node.send_expect("flow query 0 3 rss", "testpmd> ") m3 = re.match(rexp, out3.strip()) self.verify( "1 2" == m3.group(2) and "default" == m3.group(3) and "none" == m3.group(4), "Query error", ) - self.dut.send_expect("flow flush 0", "testpmd> ") - out4 = self.dut.send_expect("flow query 0 0 rss", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") + out4 = self.sut_node.send_expect("flow query 0 0 rss", "testpmd> ") self.verify("Flow rule #0 not found" in out4, "Failed to rss query!") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ @@ -5798,7 +5798,7 @@ class TestI40ERssInput(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_iavf.py b/tests/TestSuite_iavf.py index 5444a7e3..c4c5de67 100644 --- a/tests/TestSuite_iavf.py +++ b/tests/TestSuite_iavf.py @@ -14,7 +14,6 @@ import random import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE, get_nic_name from framework.test_case import TestCase @@ -28,8 +27,8 @@ class TestIavf(TestCase): supported_vf_driver = ["pci-stub", "vfio-pci"] def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None self.env_done = False self.interrupt_flag = False @@ -41,11 +40,11 @@ class TestIavf(TestCase): self.wrong_mac = "11:22:33:44:55:66" self.loading_sizes = [128, 800, 801, 1700, 2500] self.ETHER_JUMBO_FRAME_MTU = 9000 - self.tester_intf0 = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.tg_intf0 = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - self.tester_intf1 = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + self.tg_intf1 = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) # set vf assign method and vf driver @@ -57,7 +56,7 @@ class TestIavf(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.setup_vm_env() def set_up(self): @@ -70,39 +69,39 @@ class TestIavf(TestCase): if self.env_done: return try: - self.dut.send_expect("rmmod igb_uio", "# ", 60) - self.dut.send_expect("insmod %s/kmod/igb_uio.ko" % self.target, "# ", 60) + self.sut_node.send_expect("rmmod igb_uio", "# ", 60) + self.sut_node.send_expect("insmod %s/kmod/igb_uio.ko" % self.target, "# ", 60) except Exception as e: raise Exception(e) - self.pf_pci0 = self.dut.ports_info[0]["pci"] - self.pf_pci1 = self.dut.ports_info[1]["pci"] + self.pf_pci0 = self.sut_node.ports_info[0]["pci"] + self.pf_pci1 = self.sut_node.ports_info[1]["pci"] # bind to default driver - self.dut.ports_info[0]["port"].bind_driver("igb_uio") - self.dut.ports_info[1]["port"].bind_driver("igb_uio") - self.dut.generate_sriov_vfs_by_port(self.dut_ports[0], 1, "igb_uio") - self.dut.generate_sriov_vfs_by_port(self.dut_ports[1], 1, "igb_uio") - self.vf0_port = self.dut.ports_info[0]["vfs_port"] - self.vf1_port = self.dut.ports_info[1]["vfs_port"] - self.vf0_port_pci = self.dut.ports_info[0]["sriov_vfs_pci"][0] - self.vf1_port_pci = self.dut.ports_info[1]["sriov_vfs_pci"][0] + self.sut_node.ports_info[0]["port"].bind_driver("igb_uio") + self.sut_node.ports_info[1]["port"].bind_driver("igb_uio") + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[0], 1, "igb_uio") + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[1], 1, "igb_uio") + self.vf0_port = self.sut_node.ports_info[0]["vfs_port"] + self.vf1_port = self.sut_node.ports_info[1]["vfs_port"] + self.vf0_port_pci = self.sut_node.ports_info[0]["sriov_vfs_pci"][0] + self.vf1_port_pci = self.sut_node.ports_info[1]["sriov_vfs_pci"][0] # start testpmd for pf - self.dut_testpmd = PmdOutput(self.dut) + self.sut_testpmd = PmdOutput(self.sut_node) host_eal_param = "-a %s -a %s" % (self.pf_pci0, self.pf_pci1) - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", "--rxq=4 --txq=4 --port-topology=chained", eal_param=host_eal_param, ) # set vf mac - self.dut_testpmd.execute_cmd("set vf mac addr 0 0 %s" % self.vf0_mac) - self.dut_testpmd.execute_cmd("set vf mac addr 1 0 %s" % self.vf1_mac) - self.used_dut_port_0 = self.dut_ports[0] - self.used_dut_port_1 = self.dut_ports[1] - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.sut_testpmd.execute_cmd("set vf mac addr 0 0 %s" % self.vf0_mac) + self.sut_testpmd.execute_cmd("set vf mac addr 1 0 %s" % self.vf1_mac) + self.used_sut_port_0 = self.sut_ports[0] + self.used_sut_port_1 = self.sut_ports[1] + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] try: for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) @@ -114,40 +113,40 @@ class TestIavf(TestCase): vf1_prop = {"opt_host": self.sriov_vfs_port_1[0].pci} # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "iavf") + self.vm0 = VM(self.sut_node, "vm0", "iavf") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.env_done = True def destroy_vm_env(self): if getattr(self, "vm0", None): - if getattr(self, "vm_dut_0", None): - self.vm_dut_0.kill_all() + if getattr(self, "vm_sut_0", None): + self.vm_sut_0.kill_all() self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm0 = None - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] - self.used_dut_port_0 = None - if getattr(self, "used_dut_port_1", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - port = self.dut.ports_info[self.used_dut_port_1]["port"] - self.used_dut_port_1 = None - self.bind_nic_driver(self.dut_ports[:2], driver="default") + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] + self.used_sut_port_0 = None + if getattr(self, "used_sut_port_1", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + port = self.sut_node.ports_info[self.used_sut_port_1]["port"] + self.used_sut_port_1 = None + self.bind_nic_driver(self.sut_ports[:2], driver="default") self.env_done = False def send_packet(self, mac, itf, tran_type="udp", count=1, pktLength=64, VID=100): @@ -161,8 +160,8 @@ class TestIavf(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="192.168.0.%d", ' 'dst="192.168.0.%d")], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) if tran_type == "tcp": for i in range(count): @@ -171,8 +170,8 @@ class TestIavf(TestCase): 'TCP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) if tran_type == "ip/udp": for i in range(count): @@ -180,8 +179,8 @@ class TestIavf(TestCase): r'sendp([Ether(dst="%s")/IP()/UDP()/Raw("X"*%s)], iface="%s")' % (mac, pktLength, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) if tran_type == "vlan": for i in range(count): @@ -189,8 +188,8 @@ class TestIavf(TestCase): r'sendp(Ether(src="00:00:20:00:00:00", dst="%s")/Dot1Q(id=0x8100,vlan=%s)/IP()/UDP()/' 'Raw(load="XXXXXXXXXXXXXX"), iface="%s")' % (mac, VID, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "udp": for i in range(count): @@ -199,8 +198,8 @@ class TestIavf(TestCase): 'UDP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) time.sleep(1) @@ -210,43 +209,43 @@ class TestIavf(TestCase): """ scanner = 'tcpdump -vv -r tcpdump_{iface}.pcap 2>/dev/null | grep "seq" | grep "length"' scanner_result = scanner.format(**locals()) - scanner_result = self.tester.send_expect(scanner_result, "#") + scanner_result = self.tg_node.send_expect(scanner_result, "#") fially_result = re.findall(r"length( \d+)", scanner_result) return list(fially_result) def number_of_packets(self, iface): """ By reading the file generated by tcpdump it counts how many packets were - forwarded by the sample app and received in the self.tester. The sample app + forwarded by the sample app and received in the self.tg_node. The sample app will add a known MAC address for the test to look for. """ command = ( 'tcpdump -A -nn -e -v -r tcpdump_{iface}.pcap 2>/dev/null | grep -c "seq"' ) command_result = command.format(**locals()) - result = self.tester.send_expect(command_result, "#") + result = self.tg_node.send_expect(command_result, "#") return int(result.strip()) def tcpdump_stop_sniff(self): """ Stop the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") time.sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "#") + self.tg_node.send_expect('echo "Cleaning buffer"', "#") time.sleep(1) def get_tcpdump_vlan(self): command = ("tcpdump -A -nn -e -v -r tcpdump_{0}.pcap 2>/dev/null").format( - self.tester_intf0 + self.tg_intf0 ) - result = self.tester.send_expect(command, "#") + result = self.tg_node.send_expect(command, "#") return result def tcpdump_start_sniffing(self, ifaces=[]): """ - Start tcpdump in the background to sniff the tester interface where - the packets are transmitted to and from the self.dut. + Start tcpdump in the background to sniff the TG interface where + the packets are transmitted to and from the self.sut_node. All the captured packets are going to be stored in a file for a post-analysis. """ @@ -255,17 +254,17 @@ class TestIavf(TestCase): iface ) del_cmd = ("rm -f tcpdump_{0}.pcap").format(iface) - self.tester.send_expect(del_cmd, "#") - self.tester.send_expect(command, "#") + self.tg_node.send_expect(del_cmd, "#") + self.tg_node.send_expect(command, "#") def test_vf_basic_rx_tx(self): self.vm0_testpmd.start_testpmd(VM_CORES_MASK) - self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_dut_ports[0]) + self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_sut_ports[0]) self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("set fwd mac") self.vm0_testpmd.execute_cmd("start") - self.send_packet(self.vf1_mac, self.tester_intf1, "ip/udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf1_mac, self.tg_intf1, "ip/udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -277,13 +276,13 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("set allmulti all off") self.vm0_testpmd.execute_cmd("set fwd mac") self.vm0_testpmd.execute_cmd("start") - self.send_packet(self.wrong_mac, self.tester_intf1, "ip/udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.wrong_mac, self.tg_intf1, "ip/udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 0, "Not receive expected packet") - self.send_packet(self.vf1_mac, self.tester_intf1, "ip/udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf1_mac, self.tg_intf1, "ip/udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -293,18 +292,18 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("set allmulti all off") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.send_packet(self.vf0_mac, self.tester_intf0, "ip/udp") - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "ip/udp") + out = self.vm_sut_0.get_session_output() self.verify(self.vf0_mac in out, "vf receive pkt fail with current mac") - self.send_packet(self.multicast_mac, self.tester_intf0, "ip/udp") - out = self.vm_dut_0.get_session_output() + self.send_packet(self.multicast_mac, self.tg_intf0, "ip/udp") + out = self.vm_sut_0.get_session_output() self.verify(self.multicast_mac not in out, "vf receive pkt with multicast mac") self.vm0_testpmd.execute_cmd("set allmulti all on") - self.send_packet(self.vf0_mac, self.tester_intf0, "ip/udp") - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "ip/udp") + out = self.vm_sut_0.get_session_output() self.verify(self.vf0_mac in out, "vf receive pkt fail with current mac") - self.send_packet(self.multicast_mac, self.tester_intf0, "ip/udp") - out = self.vm_dut_0.get_session_output(timeout=2) + self.send_packet(self.multicast_mac, self.tg_intf0, "ip/udp") + out = self.vm_sut_0.get_session_output(timeout=2) self.verify(self.multicast_mac in out, "vf receive pkt fail with multicast mac") def test_vf_broadcast(self): @@ -313,8 +312,8 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("set fwd mac") self.vm0_testpmd.execute_cmd("start") - self.send_packet(self.broadcast_mac, self.tester_intf0, "ip/udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.broadcast_mac, self.tg_intf0, "ip/udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -323,11 +322,11 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("set promisc all on") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.send_packet(self.vf0_mac, self.tester_intf0, "ip/udp") - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "ip/udp") + out = self.vm_sut_0.get_session_output() self.verify(self.vf0_mac in out, "vf receive pkt with current mac") - self.send_packet(self.promiscuous_mac, self.tester_intf0, "ip/udp") - out = self.vm_dut_0.get_session_output(timeout=2) + self.send_packet(self.promiscuous_mac, self.tg_intf0, "ip/udp") + out = self.vm_sut_0.get_session_output(timeout=2) self.verify( self.promiscuous_mac in out, "vf receive pkt fail with different mac" ) @@ -347,14 +346,14 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("start") # send 10 vlan tagged packets, and can't forward the packets - self.send_packet(self.vf0_mac, self.tester_intf0, "vlan", count=10, VID=200) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "vlan", count=10, VID=200) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 0, "Not receive expected packet") # send 10 untagged packets, and forward the packets - self.send_packet(self.vf0_mac, self.tester_intf0, "ip/udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "ip/udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -375,23 +374,23 @@ class TestIavf(TestCase): # send 10 vid20 tagged packets, and can forward the packets self.send_packet( - self.vf0_mac, self.tester_intf0, "vlan", count=10, pktLength=100, VID=20 + self.vf0_mac, self.tg_intf0, "vlan", count=10, pktLength=100, VID=20 ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") # send 10 vid200 tagged packets, and can't forward the packets self.send_packet( - self.vf0_mac, self.tester_intf0, "vlan", count=10, pktLength=100, VID=200 + self.vf0_mac, self.tg_intf0, "vlan", count=10, pktLength=100, VID=200 ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 0, "Not receive expected packet") # send 10 udp packets, and can forward the packets - self.send_packet(self.vf0_mac, self.tester_intf0, "udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -408,8 +407,8 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("tx_vlan set 0 20") self.vm0_testpmd.execute_cmd("port start all") self.vm0_testpmd.execute_cmd("start") - self.tcpdump_start_sniffing([self.tester_intf0]) - self.send_packet(self.vf1_mac, self.tester_intf1, "ip/udp") + self.tcpdump_start_sniffing([self.tg_intf0]) + self.send_packet(self.vf1_mac, self.tg_intf1, "ip/udp") self.tcpdump_stop_sniff() out = self.get_tcpdump_vlan() self.verify(self.vf0_mac and "vlan 20" in out, "vlan tag not in out") @@ -427,16 +426,16 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("vlan set strip on 0") self.vm0_testpmd.execute_cmd("port start all") self.vm0_testpmd.execute_cmd("start") - self.tcpdump_start_sniffing([self.tester_intf0]) - self.send_packet(self.vf1_mac, self.tester_intf1, "vlan") + self.tcpdump_start_sniffing([self.tg_intf0]) + self.send_packet(self.vf1_mac, self.tg_intf1, "vlan") self.tcpdump_stop_sniff() out = self.get_tcpdump_vlan() self.verify("vlan 100" not in out and self.vf0_mac in out, "vlan tag in out") # disable strip self.vm0_testpmd.execute_cmd("vlan set strip off 1") - self.tcpdump_start_sniffing([self.tester_intf0]) - self.send_packet(self.vf1_mac, self.tester_intf1, "vlan") + self.tcpdump_start_sniffing([self.tg_intf0]) + self.send_packet(self.vf1_mac, self.tg_intf1, "vlan") self.tcpdump_stop_sniff() out = self.get_tcpdump_vlan() self.verify("vlan 100" in out and self.vf0_mac in out, "vlan tag not in out") @@ -455,14 +454,14 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("start") # send 10 tagged packets, and check 10 tagged packets received - self.send_packet(self.vf1_mac, self.tester_intf1, "vlan", count=10, VID=100) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf1_mac, self.tg_intf1, "vlan", count=10, VID=100) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") # send 10 untagged packets, and check 10 untagged packets received - self.send_packet(self.vf1_mac, self.tester_intf1, "udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf1_mac, self.tg_intf1, "udp", count=10) + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -472,12 +471,12 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - # set tester port mtu - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf0, self.ETHER_JUMBO_FRAME_MTU), "# " + # set TG port mtu + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf0, self.ETHER_JUMBO_FRAME_MTU), "# " ) - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf1, self.ETHER_JUMBO_FRAME_MTU), "# " + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf1, self.ETHER_JUMBO_FRAME_MTU), "# " ) # send 10 1518 size packets, and check 10 packets received @@ -486,9 +485,9 @@ class TestIavf(TestCase): pktLength - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) self.send_packet( - self.vf1_mac, self.tester_intf1, "ip/udp", count=10, pktLength=payload + self.vf1_mac, self.tg_intf1, "ip/udp", count=10, pktLength=payload ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -498,9 +497,9 @@ class TestIavf(TestCase): pktLength - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) self.send_packet( - self.vf1_mac, self.tester_intf1, "ip/udp", count=10, pktLength=payload + self.vf1_mac, self.tg_intf1, "ip/udp", count=10, pktLength=payload ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 0, "Not receive expected packet") @@ -512,12 +511,12 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("set fwd mac") self.vm0_testpmd.execute_cmd("start") - # set tester port mtu - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf0, self.ETHER_JUMBO_FRAME_MTU), "# " + # set TG port mtu + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf0, self.ETHER_JUMBO_FRAME_MTU), "# " ) - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf1, self.ETHER_JUMBO_FRAME_MTU), "# " + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf1, self.ETHER_JUMBO_FRAME_MTU), "# " ) # send 10 1517 size packets, and check 10 packets received @@ -526,9 +525,9 @@ class TestIavf(TestCase): pktLength - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) self.send_packet( - self.vf1_mac, self.tester_intf1, "ip/udp", count=10, pktLength=payload + self.vf1_mac, self.tg_intf1, "ip/udp", count=10, pktLength=payload ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -538,9 +537,9 @@ class TestIavf(TestCase): pktLength - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) self.send_packet( - self.vf1_mac, self.tester_intf1, "ip/udp", count=10, pktLength=payload + self.vf1_mac, self.tg_intf1, "ip/udp", count=10, pktLength=payload ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -550,9 +549,9 @@ class TestIavf(TestCase): pktLength - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) self.send_packet( - self.vf1_mac, self.tester_intf1, "ip/udp", count=10, pktLength=payload + self.vf1_mac, self.tg_intf1, "ip/udp", count=10, pktLength=payload ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -562,23 +561,23 @@ class TestIavf(TestCase): pktLength - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) self.send_packet( - self.vf1_mac, self.tester_intf1, "ip/udp", count=10, pktLength=4500 + self.vf1_mac, self.tg_intf1, "ip/udp", count=10, pktLength=4500 ) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 0, "Not receive expected packet") def validate_checksum_packet(self): normal_checksum_values = {} checksum_pattern = re.compile("chksum.*=.*(0x[0-9a-z]+)") - self.tester.send_expect("scapy", ">>> ") + self.tg_node.send_expect("scapy", ">>> ") for packet in normal_packets: - self.tester.send_expect("p = %s" % normal_packets[packet], ">>>") - out = self.tester.send_expect("p.show2()", ">>>") + self.tg_node.send_expect("p = %s" % normal_packets[packet], ">>>") + out = self.tg_node.send_expect("p.show2()", ">>>") chksums = checksum_pattern.findall(out) if chksums: normal_checksum_values[packet] = chksums - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") for index in normal_checksum_values: self.logger.info( "Good checksum value for %s Packet is: %s" @@ -588,14 +587,14 @@ class TestIavf(TestCase): # Send bad checksum packters and check if the checksum fields are correct. corrected_checksum_values = {} for packet in checksum_error_packets: - inst = self.tester.tcpdump_sniff_packets(self.tester_intf0) - self.tester.scapy_foreground() - self.tester.scapy_append( + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf0) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' - % (checksum_error_packets[packet], self.tester_intf1) + % (checksum_error_packets[packet], self.tg_intf1) ) - self.tester.scapy_execute() - rec_pkt = self.tester.load_tcpdump_sniff_packets(inst) + self.tg_node.scapy_execute() + rec_pkt = self.tg_node.load_tcpdump_sniff_packets(inst) # collect checksum values for received packet chksum = ( rec_pkt[0] @@ -705,8 +704,8 @@ class TestIavf(TestCase): self.validate_checksum_packet() def test_vf_tso(self): - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf0, self.ETHER_JUMBO_FRAME_MTU), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf0, self.ETHER_JUMBO_FRAME_MTU), "#" ) self.vm0_testpmd.start_testpmd( VM_CORES_MASK, " --max-pkt-len=%s " % self.ETHER_JUMBO_FRAME_MTU @@ -729,20 +728,20 @@ class TestIavf(TestCase): self.vm0_testpmd.execute_cmd("port start all") self.vm0_testpmd.execute_cmd("set promisc all off") self.vm0_testpmd.execute_cmd("start") - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() time.sleep(5) for loading_size in self.loading_sizes: - self.tcpdump_start_sniffing([self.tester_intf0, self.tester_intf1]) - self.tester.scapy_append( + self.tcpdump_start_sniffing([self.tg_intf0, self.tg_intf1]) + self.tg_node.scapy_append( 'sendp([Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2") \ /TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' - % (self.vf0_mac, loading_size, self.tester_intf0) + % (self.vf0_mac, loading_size, self.tg_intf0) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() self.tcpdump_stop_sniff() - rx_stats = self.number_of_packets(self.tester_intf1) - tx_stats = self.number_of_packets(self.tester_intf0) - tx_outlist = self.number_of_bytes(self.tester_intf1) + rx_stats = self.number_of_packets(self.tg_intf1) + tx_stats = self.number_of_packets(self.tg_intf0) + tx_outlist = self.number_of_bytes(self.tg_intf1) self.logger.info(tx_outlist) if loading_size <= 800: self.verify( @@ -793,34 +792,34 @@ class TestIavf(TestCase): for pkt_type in pkt_types: self.vm0_testpmd.execute_cmd("port config all rss %s" % pkt_type) self.vm0_testpmd.execute_cmd("start") - self.send_packet(self.vf1_mac, self.tester_intf1, pkt_type, count=30) + self.send_packet(self.vf1_mac, self.tg_intf1, pkt_type, count=30) time.sleep(2) - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() self.verify_packet_number(out) self.vm0_testpmd.execute_cmd("clear port stats all") def test_vf_rx_interrupt(self): # build l3fwd-power - self.vm_dut_alt = self.vm_dut_0.create_session(name="vm_dut_alt") - out = self.vm_dut_0.build_dpdk_apps("./examples/l3fwd-power") + self.vm_sut_alt = self.vm_sut_0.create_session(name="vm_sut_alt") + out = self.vm_sut_0.build_dpdk_apps("./examples/l3fwd-power") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") - self.vm_dut_0.unbind_interfaces_linux() - self.vm_dut_0.send_expect("modprobe vfio", "# ", 60) - self.vm_dut_0.send_expect("modprobe -r vfio_iommu_type1", "# ", 60) - self.vm_dut_0.send_expect( + self.vm_sut_0.unbind_interfaces_linux() + self.vm_sut_0.send_expect("modprobe vfio", "# ", 60) + self.vm_sut_0.send_expect("modprobe -r vfio_iommu_type1", "# ", 60) + self.vm_sut_0.send_expect( "modprobe vfio enable_unsafe_noiommu_mode=1", "# ", 60 ) - self.vm_dut_0.send_expect("modprobe vfio-pci", "# ", 60) - self.vm_dut_0.bind_interfaces_linux(driver="vfio-pci") + self.vm_sut_0.send_expect("modprobe vfio-pci", "# ", 60) + self.vm_sut_0.bind_interfaces_linux(driver="vfio-pci") # start l3fwd-power l3fwd_app = "./examples/l3fwd-power/build/l3fwd-power" cmd = l3fwd_app + " -l 0,1 -n 4 -- -p 0x3 --config '(0,0,0),(1,0,1)'" - self.vm_dut_0.send_expect(cmd, "POWER", timeout=40) + self.vm_sut_0.send_expect(cmd, "POWER", timeout=40) time.sleep(10) - self.send_packet(self.vf0_mac, self.tester_intf0, "ip/udp") - self.send_packet(self.vf1_mac, self.tester_intf1, "ip/udp") - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "ip/udp") + self.send_packet(self.vf1_mac, self.tg_intf1, "ip/udp") + out = self.vm_sut_0.get_session_output() self.verify( "L3FWD_POWER: lcore 0 is waked up from rx interrupt" in out, "lcore 0 is not waked up", @@ -837,9 +836,9 @@ class TestIavf(TestCase): "L3FWD_POWER: lcore 1 sleeps until interrupt triggers" in out, "lcore 1 not sleep", ) - self.send_packet(self.vf0_mac, self.tester_intf0, "udp", count=10) - self.send_packet(self.vf1_mac, self.tester_intf1, "udp", count=10) - out = self.vm_dut_0.get_session_output() + self.send_packet(self.vf0_mac, self.tg_intf0, "udp", count=10) + self.send_packet(self.vf1_mac, self.tg_intf1, "udp", count=10) + out = self.vm_sut_0.get_session_output() self.verify( "L3FWD_POWER: lcore 0 is waked up from rx interrupt" in out, "lcore 0 is not waked up", @@ -848,19 +847,19 @@ class TestIavf(TestCase): "L3FWD_POWER: lcore 1 is waked up from rx interrupt" in out, "lcore 1 is not waked up", ) - self.vm_dut_alt.send_expect("killall l3fwd-power", "# ", 10) - self.vm_dut_0.bind_interfaces_linux(driver="igb_uio") + self.vm_sut_alt.send_expect("killall l3fwd-power", "# ", 10) + self.vm_sut_0.bind_interfaces_linux(driver="igb_uio") self.interrupt_flag = True def tear_down(self): if self.running_case == "test_vf_rx_interrupt": - self.vm_dut_alt.send_expect("killall l3fwd-power", "# ", 10) - self.vm_dut_alt.close() + self.vm_sut_alt.send_expect("killall l3fwd-power", "# ", 10) + self.vm_sut_alt.close() else: self.vm0_testpmd.quit() def tear_down_all(self): - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") if self.env_done is True: self.destroy_vm_env() self.env_done = False diff --git a/tests/TestSuite_iavf_flexible_descriptor.py b/tests/TestSuite_iavf_flexible_descriptor.py index 8fe8fe0e..eba458ad 100644 --- a/tests/TestSuite_iavf_flexible_descriptor.py +++ b/tests/TestSuite_iavf_flexible_descriptor.py @@ -23,69 +23,69 @@ class TestIavfFlexibleDescriptor(TestCase, FlexibleRxdBase): Modify the dpdk code. """ cmds = [ - "cd " + self.dut.base_dir, + "cd " + self.sut_node.base_dir, "cp ./app/test-pmd/util.c .", r"""sed -i "/if dpdk_conf.has('RTE_NET_IXGBE')/i\if dpdk_conf.has('RTE_NET_ICE')\n\tdeps += ['net_ice', 'net_iavf']\nendif" app/test-pmd/meson.build""", "sed -i '/#include /a\#include ' app/test-pmd/util.c", "sed -i '/if (ol_flags & PKT_RX_RSS_HASH)/i\ rte_pmd_ifd_dump_proto_xtr_metadata(mb);' app/test-pmd/util.c", ] - [self.dut.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] - self.dut.build_install_dpdk(self.dut.target) + [self.sut_node.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] + self.sut_node.build_install_dpdk(self.sut_node.target) def restore_compilation(self): """ Resume editing operation. """ cmds = [ - "cd " + self.dut.base_dir, + "cd " + self.sut_node.base_dir, "cp ./util.c ./app/test-pmd/", "sed -i '/pmd_iavf/d' app/test-pmd/meson.build", "rm -rf ./util.c", ] - [self.dut.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] - self.dut.build_install_dpdk(self.dut.target) + [self.sut_node.send_expect(cmd, "#", 15, alt_session=True) for cmd in cmds] + self.sut_node.build_install_dpdk(self.sut_node.target) def create_vf(self): # vf relevant content - dut_index = 0 - used_dut_port = self.dut_ports[dut_index] - self.dut.send_expect("modprobe vfio-pci", "#") + sut_index = 0 + used_sut_port = self.sut_ports[sut_index] + self.sut_node.send_expect("modprobe vfio-pci", "#") # bind pf to kernel - for port in self.dut_ports: - netdev = self.dut.ports_info[port]["port"] + for port in self.sut_ports: + netdev = self.sut_node.ports_info[port]["port"] netdev.bind_driver(driver=self.kdriver) # set vf assign method and vf driver vf_driver = "vfio-pci" - self.pf0_intf = self.dut.ports_info[self.dut_ports[dut_index]]["intf"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[sut_index]]["intf"] # get priv-flags default stats if self.is_eth_series_nic(800): self.flag = "vf-vlan-pruning" else: self.flag = "vf-vlan-prune-disable" - self.default_stats = self.dut.get_priv_flags_state(self.pf0_intf, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.pf0_intf, self.flag) if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s off" % (self.pf0_intf, self.flag), "# " ) else: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf0_intf, self.flag), "# " ) # generate 2 VFs on PF - self.dut.generate_sriov_vfs_by_port(used_dut_port, 1, driver=self.kdriver) + self.sut_node.generate_sriov_vfs_by_port(used_sut_port, 1, driver=self.kdriver) vf_mac = "00:11:22:33:44:55" - self.dut.send_expect( + self.sut_node.send_expect( "ip link set {} vf 0 mac {}".format(self.pf0_intf, vf_mac), "#" ) - sriov_vf0 = self.dut.ports_info[used_dut_port]["vfs_port"][0] + sriov_vf0 = self.sut_node.ports_info[used_sut_port]["vfs_port"][0] sriov_vf0.bind_driver(vf_driver) return sriov_vf0, vf_mac def destroy_vf(self): try: port_id = 0 - self.dut.destroy_sriov_vfs_by_port(port_id) - port_obj = self.dut.ports_info[port_id]["port"] + self.sut_node.destroy_sriov_vfs_by_port(port_id) + port_obj = self.sut_node.ports_info[port_id]["port"] port_obj.bind_driver(self.drivername) except Exception as e: self.logger.info(traceback.format_exc()) @@ -95,8 +95,8 @@ class TestIavfFlexibleDescriptor(TestCase, FlexibleRxdBase): """ run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.preset_compilation() self.sriov_vf0, vf_mac = self.create_vf() self.init_base(self.sriov_vf0.pci, vf_mac, "iavf") @@ -108,7 +108,7 @@ class TestIavfFlexibleDescriptor(TestCase, FlexibleRxdBase): self.destroy_vf() self.restore_compilation() if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf0_intf, self.flag, self.default_stats), "# ", @@ -126,7 +126,7 @@ class TestIavfFlexibleDescriptor(TestCase, FlexibleRxdBase): """ self.close_testpmd() time.sleep(2) - self.dut.kill_all() + self.sut_node.kill_all() @skip_unsupported_pkg("os default") def test_check_single_VLAN_fields_in_RXD_8021Q(self): diff --git a/tests/TestSuite_iavf_package_driver_error_handle.py b/tests/TestSuite_iavf_package_driver_error_handle.py index 37efa3e0..eea414a8 100644 --- a/tests/TestSuite_iavf_package_driver_error_handle.py +++ b/tests/TestSuite_iavf_package_driver_error_handle.py @@ -18,9 +18,9 @@ class Testiavf_package_and_driver_check(TestCase): self.nic in ["ICE_100G-E810C_QSFP", "ICE_25G-E810C_SFP"], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.used_dut_port = self.dut_ports[0] - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.used_sut_port = self.sut_ports[0] + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.PF_QUEUE = 16 conf_file = os.path.join(CONFIG_ROOT_PATH, "iavf_driver_package.cfg") @@ -28,22 +28,22 @@ class Testiavf_package_and_driver_check(TestCase): conf_session = conf_peer.conf._sections["suite"] self.driverPath_latest = conf_session["ice_driver_file_location_latest"] self.driverPath_old = conf_session["ice_driver_ice_10_rc17_driver"] - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_p0 = self.tester.get_interface(localPort0) - self.tester.send_expect("ifconfig %s -promisc" % self.tester_p0, "#") + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_p0 = self.tg_node.get_interface(localPort0) + self.tg_node.send_expect("ifconfig %s -promisc" % self.tg_p0, "#") - self.dut_p0_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.tester_p0_mac = self.tester.get_mac(localPort0) - self.dut_testpmd = PmdOutput(self.dut) + self.sut_p0_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.tg_p0_mac = self.tg_node.get_mac(localPort0) + self.sut_testpmd = PmdOutput(self.sut_node) self.pkg_file1 = "/lib/firmware/intel/ice/ddp/ice.pkg" self.pkg_file2 = "/lib/firmware/updates/intel/ice/ddp/ice.pkg" - out = self.dut.send_expect("ls %s" % self.pkg_file1, "#") + out = self.sut_node.send_expect("ls %s" % self.pkg_file1, "#") self.verify( "No such file or directory" not in out, "Cannot find %s, please check you system/driver." % self.pkg_file1, ) - out = self.dut.send_expect("ls %s" % self.pkg_file2, "#") + out = self.sut_node.send_expect("ls %s" % self.pkg_file2, "#") self.verify( "No such file or directory" not in out, "Cannot find %s, please check you system/driver." % self.pkg_file2, @@ -60,10 +60,10 @@ class Testiavf_package_and_driver_check(TestCase): """ backup_file = "/opt/ice.pkg_backup" if flag == "backup": - self.dut.send_expect("\cp %s %s" % (self.pkg_file1, backup_file), "#") + self.sut_node.send_expect("\cp %s %s" % (self.pkg_file1, backup_file), "#") else: - self.dut.send_expect("\cp %s %s" % (backup_file, self.pkg_file1), "#") - self.dut.send_expect("\cp %s %s" % (backup_file, self.pkg_file2), "#") + self.sut_node.send_expect("\cp %s %s" % (backup_file, self.pkg_file1), "#") + self.sut_node.send_expect("\cp %s %s" % (backup_file, self.pkg_file2), "#") def use_correct_ice_pkg(self, flag="true"): """ @@ -73,20 +73,20 @@ class Testiavf_package_and_driver_check(TestCase): if flag == "true": self.backup_recover_ice_pkg("recover") else: - self.dut.send_expect("rm -rf %s" % self.pkg_file1, "#") - self.dut.send_expect("touch %s" % self.pkg_file1, "#") - self.dut.send_expect("rm -rf %s" % self.pkg_file2, "#") - self.dut.send_expect("touch %s" % self.pkg_file2, "#") + self.sut_node.send_expect("rm -rf %s" % self.pkg_file1, "#") + self.sut_node.send_expect("touch %s" % self.pkg_file1, "#") + self.sut_node.send_expect("rm -rf %s" % self.pkg_file2, "#") + self.sut_node.send_expect("touch %s" % self.pkg_file2, "#") def start_testpmd(self, ice_pkg="true", safe_mode_support="false"): self.eal_param = "" if safe_mode_support == "true": - for i in range(len(self.dut_ports)): + for i in range(len(self.sut_ports)): self.eal_param = ( self.eal_param - + "-a %s,safe-mode-support=1 " % self.dut.ports_info[i]["pci"] + + "-a %s,safe-mode-support=1 " % self.sut_node.ports_info[i]["pci"] ) - out = self.dut_testpmd.start_testpmd( + out = self.sut_testpmd.start_testpmd( "all", "--nb-cores=8 --rxq=%s --txq=%s --port-topology=chained" % (self.PF_QUEUE, self.PF_QUEUE), @@ -109,13 +109,13 @@ class Testiavf_package_and_driver_check(TestCase): error_message in out, "There should be error messages in out: %s" % out, ) - self.dut_testpmd.execute_cmd("set promisc all off") - self.dut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("set promisc all off") + self.sut_testpmd.execute_cmd("set verbose 1") def number_of_packets(self, iface): """ By reading the file generated by tcpdump it counts how many packets were - forwarded by the sample app and received in the self.tester. The sample app + forwarded by the sample app and received in the self.tg_node. The sample app will add a known MAC address for the test to look for. """ command = ( @@ -129,112 +129,112 @@ class Testiavf_package_and_driver_check(TestCase): Sends packets. """ self.loading_size = 30 - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() if tran_type == "ipv4-other": for i in range(1): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/("X"*%s)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, self.loading_size, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-udp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-sctp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-tcp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IPv6(src="::%d", dst="::%d")/TCP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-udp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IPv6(src="::%d", dst="::%d")/UDP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-sctp": for i in range(16): packet = ( r'sendp([Ether(dst="%s", src="%s")/IPv6(src="::%d", dst="::%d",nh=132)/SCTP(sport=1024,dport=1024)], iface="%s")' % ( - self.dut_p0_mac, - self.tester_p0_mac, + self.sut_p0_mac, + self.tg_p0_mac, i + 1, i + 2, - self.tester_p0, + self.tg_p0, ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") @@ -248,11 +248,11 @@ class Testiavf_package_and_driver_check(TestCase): """ if tran_type == "ipv4-other": self.tcpdump_stop_sniff() - p0_stats = self.number_of_packets(self.tester_p0) - p1_stats = self.number_of_packets(self.tester_p1) - self.verify(p0_stats == p1_stats, "tester p0 and p1: packet number match") + p0_stats = self.number_of_packets(self.tg_p0) + p1_stats = self.number_of_packets(self.tg_p1) + self.verify(p0_stats == p1_stats, "TG p0 and p1: packet number match") else: - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() queue_list = [] lines = out.split("\r\n") for line in lines: @@ -280,17 +280,17 @@ class Testiavf_package_and_driver_check(TestCase): """ use wrong ice.pkg and start testpmd without "safe-mode-suppor", no port is loaded in testpmd """ - self.dut.bind_interfaces_linux("ice") + self.sut_node.bind_interfaces_linux("ice") self.use_correct_ice_pkg(flag="false") # import pdb # pdb.set_trace() - self.dut.send_expect("rmmod -f ice", "#") - self.dut.send_expect("insmod %s" % self.driverPath_latest, "#") - # self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2) - self.dut.bind_interfaces_linux("ice") - self.used_dut_port_pci = self.dut.ports_info[self.used_dut_port]["port"].pci - cmd = "echo 2 > /sys/bus/pci/devices/%s/sriov_numvfs" % self.used_dut_port_pci - out = self.dut.send_expect(cmd, "#", 60) + self.sut_node.send_expect("rmmod -f ice", "#") + self.sut_node.send_expect("insmod %s" % self.driverPath_latest, "#") + # self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2) + self.sut_node.bind_interfaces_linux("ice") + self.used_sut_port_pci = self.sut_node.ports_info[self.used_sut_port]["port"].pci + cmd = "echo 2 > /sys/bus/pci/devices/%s/sriov_numvfs" % self.used_sut_port_pci + out = self.sut_node.send_expect(cmd, "#", 60) # import pdb # pdb.set_trace() self.verify( @@ -303,24 +303,24 @@ class Testiavf_package_and_driver_check(TestCase): """ use wrong ice.pkg and start testpmd without "safe-mode-suppor", no port is loaded in testpmd """ - self.dut.bind_interfaces_linux("ice") + self.sut_node.bind_interfaces_linux("ice") self.use_correct_ice_pkg("true") - self.dut.send_expect("rmmod -f ice", "#") - self.dut.send_expect("insmod %s" % self.driverPath_old, "#") - self.used_dut_port_pci = self.dut.ports_info[self.used_dut_port]["port"].pci + self.sut_node.send_expect("rmmod -f ice", "#") + self.sut_node.send_expect("insmod %s" % self.driverPath_old, "#") + self.used_sut_port_pci = self.sut_node.ports_info[self.used_sut_port]["port"].pci - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver("vfio-pci") testpmdcmd = ( - self.dut.apps_name["test-pmd"] + self.sut_node.apps_name["test-pmd"] + "-l 6-9 -n 4 --file-prefix=vf -- -i --rxq=4 --txq=4 --nb-cores=2" ) - self.dut_testpmd.execute_cmd(testpmdcmd) - out = self.dut_testpmd.execute_cmd( + self.sut_testpmd.execute_cmd(testpmdcmd) + out = self.sut_testpmd.execute_cmd( "flow create 0 ingress pattern eth / ipv4 / end actions rss types l3-dst-only end key_len 0 queues end / end" ) self.verify( @@ -331,25 +331,25 @@ class Testiavf_package_and_driver_check(TestCase): def copy_specify_ice_pkg(self, pkg_ver): """ - Copy 2 different ``ice-xxx.pkg`` from dts/dep to dut /tmp/ + Copy 2 different ``ice-xxx.pkg`` from dts/dep to SUT /tmp/ pkg_files = ['ice-1.3.4.0.pkg', 'ice-1.3.10.0.pkg'] """ dst = "/tmp" pkg_file = "ice-%s.pkg" % pkg_ver src_file = r"./dep/%s" % pkg_file - self.dut.session.copy_file_to(src_file, dst) + self.sut_node.session.copy_file_to(src_file, dst) def generate_delete_specify_pkg(self, pkg_ver, sn, key="true"): - self.dut.send_expect("rm -rf /lib/firmware/intel/ice/ddp/ice-%s.pkg" % sn, "#") + self.sut_node.send_expect("rm -rf /lib/firmware/intel/ice/ddp/ice-%s.pkg" % sn, "#") if key == "true": - self.dut.send_expect( + self.sut_node.send_expect( "\cp /tmp/ice-%s.pkg /lib/firmware/intel/ice/ddp/ice-%s.pkg" % (pkg_ver, sn), "#", ) def tear_down(self): - self.dut_testpmd.quit() + self.sut_testpmd.quit() def tear_down_all(self): """ diff --git a/tests/TestSuite_ice_1pps_signal.py b/tests/TestSuite_ice_1pps_signal.py index feec0ef7..01708e74 100644 --- a/tests/TestSuite_ice_1pps_signal.py +++ b/tests/TestSuite_ice_1pps_signal.py @@ -18,13 +18,13 @@ class TestICE1PPS(TestCase): prerequisites. """ # Based on h/w type, chose how many ports to use - dut_ports = self.dut.get_ports(self.nic) - self.verify(len(dut_ports) >= 1, "Insufficient ports for testing") + sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(sut_ports) >= 1, "Insufficient ports for testing") # Verify that enough threads are available - self.cores = self.dut.get_core_list("1S/2C/1T") + self.cores = self.sut_node.get_core_list("1S/2C/1T") self.verify(self.cores, "Insufficient cores for speed testing") - self.pci = self.dut.ports_info[dut_ports[0]]["pci"] - self.pmd_output = PmdOutput(self.dut) + self.pci = self.sut_node.ports_info[sut_ports[0]]["pci"] + self.pmd_output = PmdOutput(self.sut_node) self.GLTSYN_AUX = re.compile(r"0x00000007\s+\(7\)") self.GLTSYN_CLKO = re.compile(r"0x1DCD6500\s+\(500000000\)") self.pattern = re.compile( @@ -122,7 +122,7 @@ class TestICE1PPS(TestCase): self.pmd_output.quit() def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_advanced_iavf_rss.py b/tests/TestSuite_ice_advanced_iavf_rss.py index f251418c..591a1596 100644 --- a/tests/TestSuite_ice_advanced_iavf_rss.py +++ b/tests/TestSuite_ice_advanced_iavf_rss.py @@ -5,8 +5,8 @@ import random import re -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .rte_flow_common import FdirProcessing, RssProcessing, check_mark @@ -6001,19 +6001,19 @@ class AdvancedIavfRSSTest(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - - self.used_dut_port = self.dut_ports[0] - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + + self.used_sut_port = self.sut_ports[0] + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_flag = False self.create_iavf() @@ -6021,15 +6021,15 @@ class AdvancedIavfRSSTest(TestCase): """ Run before each test case. """ - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.launch_testpmd() self.rxq = 16 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -6038,9 +6038,9 @@ class AdvancedIavfRSSTest(TestCase): def create_iavf(self): if self.vf_flag is False: - self.dut.bind_interfaces_linux("ice") - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.bind_interfaces_linux("ice") + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True try: @@ -6048,8 +6048,8 @@ class AdvancedIavfRSSTest(TestCase): port.bind_driver(self.drivername) self.vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} - self.dut.send_expect("ifconfig %s up" % self.pf_interface, "# ") - self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s up" % self.pf_interface, "# ") + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf_interface, vf0_mac), "# " ) except Exception as e: @@ -6058,7 +6058,7 @@ class AdvancedIavfRSSTest(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def launch_testpmd(self): @@ -6220,7 +6220,7 @@ class AdvancedIavfRSSTest(TestCase): }, ] self.rssprocess.handle_tests(tests, 0) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) # Subcase 2: two rules with same pattern but different hash input set, hit default profile self.logger.info( @@ -6272,7 +6272,7 @@ class AdvancedIavfRSSTest(TestCase): }, ] self.rssprocess.handle_tests(tests, 0) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) # Subcase 3: two rules, scope smaller created first, and the larger one created later self.logger.info( @@ -6324,7 +6324,7 @@ class AdvancedIavfRSSTest(TestCase): }, ] self.rssprocess.handle_tests(tests, 0) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) # Subcase 4: two rules, scope larger created first, and the smaller one created later self.logger.info( @@ -6605,23 +6605,23 @@ class AdvancedIavfRSSTest(TestCase): def validate_packet_checksum(self, pkts, expect_pkts): expect_chksum = dict() checksum_pattern = re.compile("chksum.*=.*(0x[0-9a-z]+)") - self.tester.send_expect("scapy", ">>> ") - sniff_src = self.dut.get_mac_address(self.dut_ports[0]) + self.tg_node.send_expect("scapy", ">>> ") + sniff_src = self.sut_node.get_mac_address(self.sut_ports[0]) for pkt in expect_pkts: - self.tester.send_expect("p = %s" % expect_pkts[pkt], ">>>") - out = self.tester.send_expect("p.show2()", ">>>") + self.tg_node.send_expect("p = %s" % expect_pkts[pkt], ">>>") + out = self.tg_node.send_expect("p.show2()", ">>>") chksums = checksum_pattern.findall(out) expect_chksum[pkt] = chksums self.logger.info(expect_chksum) - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") for pkt in pkts: - inst = self.tester.tcpdump_sniff_packets( - intf=self.tester_iface0, + inst = self.tg_node.tcpdump_sniff_packets( + intf=self.tg_iface0, count=len(pkts), filters=[{"layer": "ether", "config": {"src": vf0_mac}}], ) out = self.rssprocess.send_pkt_get_output(pkts=pkts[pkt]) - rece_pkt = self.tester.load_tcpdump_sniff_packets(inst) + rece_pkt = self.tg_node.load_tcpdump_sniff_packets(inst) rece_chksum = ( rece_pkt[0] .sprintf("%IP.chksum%;%TCP.chksum%;%UDP.chksum%;%SCTP.chksum%") @@ -6752,7 +6752,7 @@ class AdvancedIavfRSSTest(TestCase): def test_combined_case_with_fdir_queue_group(self): fdirprocess = FdirProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) hash_and_queue_list = [] queue_group = re.compile("end actions rss queues (\d+)\s(\d+)") @@ -6863,11 +6863,11 @@ class AdvancedIavfRSSTest(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") self.pmd_output.execute_cmd("quit", "#") def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_iavf() diff --git a/tests/TestSuite_ice_advanced_iavf_rss_gtpogre.py b/tests/TestSuite_ice_advanced_iavf_rss_gtpogre.py index dbfda746..3fa3346a 100644 --- a/tests/TestSuite_ice_advanced_iavf_rss_gtpogre.py +++ b/tests/TestSuite_ice_advanced_iavf_rss_gtpogre.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .rte_flow_common import RssProcessing @@ -5692,43 +5692,43 @@ class TestICEAdvancedIAVFRSSGTPoGRE(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "vfio-pci" - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, 1, driver=self.kdriver + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, 1, driver=self.kdriver ) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.dut.send_expect( + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "#" ) self.vf0_pci = self.sriov_vfs_port[0].pci for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.rxq = 16 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -5750,7 +5750,7 @@ class TestICEAdvancedIAVFRSSGTPoGRE(TestCase): self.destroy_vf() def destroy_vf(self): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) def launch_testpmd(self): # if support add --disable-rss diff --git a/tests/TestSuite_ice_advanced_iavf_rss_gtpu.py b/tests/TestSuite_ice_advanced_iavf_rss_gtpu.py index 040199f4..e7eb3200 100644 --- a/tests/TestSuite_ice_advanced_iavf_rss_gtpu.py +++ b/tests/TestSuite_ice_advanced_iavf_rss_gtpu.py @@ -6,8 +6,8 @@ import random import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from .rte_flow_common import RssProcessing @@ -8467,45 +8467,45 @@ class TestICEAdvancedIAVFRSSGTPU(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "vfio-pci" - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, 1, driver=self.kdriver + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, 1, driver=self.kdriver ) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.dut.send_expect( + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "#" ) self.vf0_pci = self.sriov_vfs_port[0].pci for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.launch_testpmd() self.symmetric = False self.rxq = 16 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -8516,9 +8516,9 @@ class TestICEAdvancedIAVFRSSGTPU(TestCase): self.pmd_output.execute_cmd("start") def destroy_vf(self): - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) def launch_testpmd(self, symmetric=False): if symmetric: @@ -9701,10 +9701,10 @@ class TestICEAdvancedIAVFRSSGTPU(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): self.destroy_vf() - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_advanced_iavf_rss_pppol2tpoudp.py b/tests/TestSuite_ice_advanced_iavf_rss_pppol2tpoudp.py index 33e3ddb8..dfe3cba4 100644 --- a/tests/TestSuite_ice_advanced_iavf_rss_pppol2tpoudp.py +++ b/tests/TestSuite_ice_advanced_iavf_rss_pppol2tpoudp.py @@ -6,8 +6,8 @@ import random import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .rte_flow_common import RssProcessing @@ -3009,45 +3009,45 @@ class TestICEAdvancedIAVFRSSPPPoL2TPv2oUDP(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "vfio-pci" - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, 1, driver=self.kdriver + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, 1, driver=self.kdriver ) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.dut.send_expect( + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "#" ) self.vf0_pci = self.sriov_vfs_port[0].pci for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.launch_testpmd() self.symmetric = False self.rxq = 16 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -3058,9 +3058,9 @@ class TestICEAdvancedIAVFRSSPPPoL2TPv2oUDP(TestCase): self.pmd_output.execute_cmd("start") def destroy_vf(self): - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) def launch_testpmd(self, symmetric=False): param = "--disable-rss --rxq=16 --txq=16 --rxd=384 --txd=384" @@ -3620,10 +3620,10 @@ class TestICEAdvancedIAVFRSSPPPoL2TPv2oUDP(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): self.destroy_vf() - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp.py b/tests/TestSuite_ice_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp.py index 0c24188a..85b2aabb 100644 --- a/tests/TestSuite_ice_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp.py +++ b/tests/TestSuite_ice_advanced_iavf_rss_vlan_esp_ah_l2tp_pfcp.py @@ -8,8 +8,8 @@ import string import time from framework.config import UserConf -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from .rte_flow_common import RssProcessing @@ -978,37 +978,37 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] - - self.used_dut_port = self.dut_ports[0] - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + + self.used_sut_port = self.sut_ports[0] + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_flag = False self.create_iavf() - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.symmetric = False self.rxq = 16 self.rsspro = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rsspro.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rsspro.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rsspro.test_case)) self.switch_testpmd(symmetric=self.symmetric) - self.dut_session = self.dut.new_session() + self.sut_session = self.sut_node.new_session() def set_up(self): """ @@ -1016,37 +1016,37 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): """ # check testpmd process status cmd = "ps -aux | grep testpmd | grep -v grep" - out = self.dut_session.send_expect(cmd, "#", 15) + out = self.sut_session.send_expect(cmd, "#", 15) if "testpmd" not in out: self.switch_testpmd(symmetric=False) if self.running_case == "test_unsupported_pattern_with_OS_default_package": - self.dut.kill_all() + self.sut_node.kill_all() self.switch_testpmd(symmetric=True) def create_iavf(self): if self.vf_flag is False: - self.dut.bind_interfaces_linux("ice") + self.sut_node.bind_interfaces_linux("ice") # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state( + self.default_stats = self.sut_node.get_priv_flags_state( self.pf_interface, self.flag ) if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s off" % (self.pf_interface, self.flag), "# ", ) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True try: for port in self.sriov_vfs_port: port.bind_driver(self.drivername) self.vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} - self.dut.send_expect("ifconfig %s up" % self.pf_interface, "# ") - self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s up" % self.pf_interface, "# ") + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf_interface, vf0_mac), "# " ) except Exception as e: @@ -1055,7 +1055,7 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def tear_down(self): @@ -1072,10 +1072,10 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_iavf() if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf_interface, self.flag, self.default_stats), "# ", @@ -1099,7 +1099,7 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): self.verify(res is True, "there have port link is down") def switch_testpmd(self, symmetric=False): - self.dut.kill_all() + self.sut_node.kill_all() self.launch_testpmd(symmetric) self.pmd_output.execute_cmd("start") @@ -1166,11 +1166,11 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): def test_delete_nonexisting_rule(self): self.rsspro.check_rule(stats=False) - out = self.dut.send_command("flow destroy 0 rule 0", timeout=1) + out = self.sut_node.send_command("flow destroy 0 rule 0", timeout=1) self.verify( "error" not in out, "delete nonexisting rule raise err,expected no err" ) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) @skip_unsupported_pkg(["comms", "wireless"]) def test_unsupported_pattern_with_OS_default_package(self): @@ -1182,7 +1182,7 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): ] self.rsspro.create_rule(rule_list, check_stats=False, msg="Invalid argument") self.rsspro.check_rule(stats=False) - self.dut.kill_all() + self.sut_node.kill_all() self.switch_testpmd(symmetric=False) def test_invalid_port(self): @@ -1190,7 +1190,7 @@ class ICE_advance_iavf_rss_vlan_ah_l2tp_pfcp(TestCase): self.rsspro.create_rule(rule, check_stats=False, msg="No such device") self.rsspro.check_rule(stats=False, rule_list=[rule]) pattern = "Invalid port 1" - out = self.dut.send_command("flow list 1", timeout=1) + out = self.sut_node.send_command("flow list 1", timeout=1) result = re.search(r"%s" % pattern, out) self.verify( result, diff --git a/tests/TestSuite_ice_advanced_rss.py b/tests/TestSuite_ice_advanced_rss.py index 530a7f69..87dd97cc 100644 --- a/tests/TestSuite_ice_advanced_rss.py +++ b/tests/TestSuite_ice_advanced_rss.py @@ -5,8 +5,8 @@ import random import re -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .rte_flow_common import FdirProcessing, RssProcessing, check_mark @@ -5179,31 +5179,31 @@ class AdvancedRSSTest(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] self.pass_flag = "passed" self.fail_flag = "failed" - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.package_version = self.launch_testpmd() self.symmetric = False self.rxq = 64 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -5744,23 +5744,23 @@ class AdvancedRSSTest(TestCase): def validate_packet_checksum(self, pkts, expect_pkts): expect_chksum = dict() checksum_pattern = re.compile("chksum.*=.*(0x[0-9a-z]+)") - self.tester.send_expect("scapy", ">>> ") - sniff_src = self.dut.get_mac_address(self.dut_ports[0]) + self.tg_node.send_expect("scapy", ">>> ") + sniff_src = self.sut_node.get_mac_address(self.sut_ports[0]) for pkt in expect_pkts: - self.tester.send_expect("p = %s" % expect_pkts[pkt], ">>>") - out = self.tester.send_expect("p.show2()", ">>>") + self.tg_node.send_expect("p = %s" % expect_pkts[pkt], ">>>") + out = self.tg_node.send_expect("p.show2()", ">>>") chksums = checksum_pattern.findall(out) expect_chksum[pkt] = chksums self.logger.info(expect_chksum) - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") for pkt in pkts: - inst = self.tester.tcpdump_sniff_packets( - intf=self.tester_iface0, + inst = self.tg_node.tcpdump_sniff_packets( + intf=self.tg_iface0, count=len(pkts), filters=[{"layer": "ether", "config": {"src": sniff_src}}], ) out = self.rssprocess.send_pkt_get_output(pkts=pkts[pkt]) - rece_pkt = self.tester.load_tcpdump_sniff_packets(inst) + rece_pkt = self.tg_node.load_tcpdump_sniff_packets(inst) rece_chksum = ( rece_pkt[0] .sprintf("%IP.chksum%;%TCP.chksum%;%UDP.chksum%;%SCTP.chksum%") @@ -5891,7 +5891,7 @@ class AdvancedRSSTest(TestCase): def test_combined_case_with_fdir_queue_group(self): fdirprocess = FdirProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) hash_and_queue_list = [] queue_group = re.compile("end actions rss queues (\d+)\s(\d+)") @@ -5983,9 +5983,9 @@ class AdvancedRSSTest(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_advanced_rss_gtpogre.py b/tests/TestSuite_ice_advanced_rss_gtpogre.py index 931b546c..dc627602 100644 --- a/tests/TestSuite_ice_advanced_rss_gtpogre.py +++ b/tests/TestSuite_ice_advanced_rss_gtpogre.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .rte_flow_common import RssProcessing @@ -3800,31 +3800,31 @@ class TestICEAdvancedRSSGTPoGRE(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] self.pass_flag = "passed" self.fail_flag = "failed" - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.launch_testpmd() self.enable_rss = False self.rxq = 64 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -4335,9 +4335,9 @@ class TestICEAdvancedRSSGTPoGRE(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_advanced_rss_gtpu.py b/tests/TestSuite_ice_advanced_rss_gtpu.py index 34f07c9c..a82c55c0 100644 --- a/tests/TestSuite_ice_advanced_rss_gtpu.py +++ b/tests/TestSuite_ice_advanced_rss_gtpu.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from .rte_flow_common import RssProcessing @@ -5069,31 +5069,31 @@ class TestICEAdvancedRSSGTPU(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] self.pass_flag = "passed" self.fail_flag = "failed" - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.launch_testpmd() self.enable_rss = False self.rxq = 64 self.rssprocess = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rssprocess.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rssprocess.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rssprocess.test_case)) @@ -6425,9 +6425,9 @@ class TestICEAdvancedRSSGTPU(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_advanced_rss_pppoe.py b/tests/TestSuite_ice_advanced_rss_pppoe.py index a32f2e79..8d2fa185 100644 --- a/tests/TestSuite_ice_advanced_rss_pppoe.py +++ b/tests/TestSuite_ice_advanced_rss_pppoe.py @@ -6,8 +6,8 @@ import random import re import string -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, skip_unsupported_pkg from .rte_flow_common import RssProcessing @@ -5390,28 +5390,28 @@ class Advanced_rss_pppoe(TestCase): Generic filter Prerequistites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) self.pci_list = [] - for port in self.dut.ports_info: + for port in self.sut_node.ports_info: self.pci_list.append(port["pci"]) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.symmetric = False self.rxq = 64 self.rsspro = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rsspro.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rsspro.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rsspro.test_case)) @@ -5426,16 +5426,16 @@ class Advanced_rss_pppoe(TestCase): Run after each test case. """ # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() def launch_testpmd(self, symmetric=False): if symmetric: @@ -5458,7 +5458,7 @@ class Advanced_rss_pppoe(TestCase): self.verify(res is True, "there have port link is down") def switch_testpmd(self, symmetric=True): - self.dut.kill_all() + self.sut_node.kill_all() self.launch_testpmd(symmetric) self.pmd_output.execute_cmd("start") diff --git a/tests/TestSuite_ice_advanced_rss_vlan_esp_ah_l2tp_pfcp.py b/tests/TestSuite_ice_advanced_rss_vlan_esp_ah_l2tp_pfcp.py index dd610bc4..c74aae51 100644 --- a/tests/TestSuite_ice_advanced_rss_vlan_esp_ah_l2tp_pfcp.py +++ b/tests/TestSuite_ice_advanced_rss_vlan_esp_ah_l2tp_pfcp.py @@ -7,8 +7,8 @@ import re import string from framework.config import UserConf -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from .rte_flow_common import RssProcessing @@ -842,28 +842,28 @@ class Advanced_rss_vlan_ah_l2tp_pfcp(TestCase): """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) self.pci_list = [] - for port in self.dut.ports_info: + for port in self.sut_node.ports_info: self.pci_list.append(port["pci"]) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.symmetric = False self.rxq = 64 self.rsspro = RssProcessing( - self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmd_output, [self.tg_iface0, self.tg_iface1], self.rxq ) self.logger.info( - "rssprocess.tester_ifaces: {}".format(self.rsspro.tester_ifaces) + "rssprocess.tg_ifaces: {}".format(self.rsspro.tg_ifaces) ) self.logger.info("rssprocess.test_case: {}".format(self.rsspro.test_case)) @@ -878,16 +878,16 @@ class Advanced_rss_vlan_ah_l2tp_pfcp(TestCase): Run after each test case. """ # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() def launch_testpmd(self, symmetric=False): if symmetric: @@ -910,7 +910,7 @@ class Advanced_rss_vlan_ah_l2tp_pfcp(TestCase): self.verify(res is True, "there have port link is down") def switch_testpmd(self, symmetric=True): - self.dut.kill_all() + self.sut_node.kill_all() self.launch_testpmd(symmetric) self.pmd_output.execute_cmd("start") @@ -991,11 +991,11 @@ class Advanced_rss_vlan_ah_l2tp_pfcp(TestCase): def test_delete_nonexisting_rule(self): self.switch_testpmd(symmetric=True) self.rsspro.check_rule(stats=False) - out = self.dut.send_command("flow destroy 0 rule 0", timeout=1) + out = self.sut_node.send_command("flow destroy 0 rule 0", timeout=1) self.verify( "error" not in out, "delete nonexisting rule raise err,expected no err" ) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) @skip_unsupported_pkg(["comms", "wireless"]) def test_unsupported_pattern_with_OS_default_package(self): @@ -1018,7 +1018,7 @@ class Advanced_rss_vlan_ah_l2tp_pfcp(TestCase): self.rsspro.create_rule(rule, check_stats=False, msg="No such device") self.rsspro.check_rule(stats=False, rule_list=[rule]) pattern = "Invalid port 1" - out = self.dut.send_command("flow list 1", timeout=1) + out = self.sut_node.send_command("flow list 1", timeout=1) result = re.search(r"%s" % pattern, out) self.verify( result, diff --git a/tests/TestSuite_ice_dcf_acl_filter.py b/tests/TestSuite_ice_dcf_acl_filter.py index 87aec71a..fc74f1d9 100644 --- a/tests/TestSuite_ice_dcf_acl_filter.py +++ b/tests/TestSuite_ice_dcf_acl_filter.py @@ -9,8 +9,8 @@ import re import time import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import BLUE, GREEN, RED @@ -870,7 +870,7 @@ class ICEDCFACLFilterTest(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -878,13 +878,13 @@ class ICEDCFACLFilterTest(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -899,43 +899,43 @@ class ICEDCFACLFilterTest(TestCase): self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"], "nic is not Intel® Ethernet 800 Series", ) - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_iface0 = self.tester.get_interface(localPort0) - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_intf = self.dut.ports_info[self.dut_ports[1]]["intf"] - self.dut.send_expect("ifconfig %s up" % self.tester_iface0, "# ") - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_iface0 = self.tg_node.get_interface(localPort0) + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_intf = self.sut_node.ports_info[self.sut_ports[1]]["intf"] + self.sut_node.send_expect("ifconfig %s up" % self.tg_iface0, "# ") + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.testpmd_status = "close" # bind pf to kernel - self.bind_nics_driver(self.dut_ports, driver="ice") + self.bind_nics_driver(self.sut_ports, driver="ice") # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe uio", "# ") - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("modprobe uio", "# ") + self.path = self.sut_node.apps_name["test-pmd"] self.setup_1pf_vfs_env() - self.dut.send_expect("ifconfig %s up" % self.tester_iface0, "# ", 15) + self.sut_node.send_expect("ifconfig %s up" % self.tg_iface0, "# ", 15) self.src_file_dir = "dep/" - self.dut_file_dir = "/tmp/" + self.sut_file_dir = "/tmp/" def setup_1pf_vfs_env(self, pf_port=0, driver="default"): - self.used_dut_port_0 = self.dut_ports[pf_port] + self.used_sut_port_0 = self.sut_ports[pf_port] # get PF interface name - out = self.dut.send_expect("ethtool -i %s" % self.pf0_intf, "#") + out = self.sut_node.send_expect("ethtool -i %s" % self.pf0_intf, "#") # generate 4 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 4, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 4, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] self.vf0_pci = self.sriov_vfs_port_0[0].pci self.vf1_pci = self.sriov_vfs_port_0[1].pci # set VF0 as trust - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") # set VF1 mac address - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 1 mac %s" % (self.pf0_intf, vf1_mac), "# " ) # bind VFs to dpdk driver @@ -953,7 +953,7 @@ class ICEDCFACLFilterTest(TestCase): Create testpmd command """ # Prepare testpmd EAL and parameters - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[self.vf0_pci, self.vf1_pci], port_options={self.vf0_pci: "cap=dcf"}, @@ -967,18 +967,18 @@ class ICEDCFACLFilterTest(TestCase): """ time.sleep(5) command = self.create_testpmd_command(param) - out = self.dut.send_expect(command, "testpmd> ", 20) + out = self.sut_node.send_expect(command, "testpmd> ", 20) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) self.pmd_output.execute_cmd("start") return out def send_packets(self, packets): - self.pkt.update_pkt(packets) - tx_port = self.tester_iface0 - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port) + self.scapy_pkt_builder.update_pkt(packets) + tx_port = self.tg_iface0 + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port) def send_pkts_getouput(self, pkts): """ @@ -987,7 +987,7 @@ class ICEDCFACLFilterTest(TestCase): """ self.send_packets(pkts) time.sleep(1) - out_info = self.dut.get_session_output(timeout=1) + out_info = self.sut_node.get_session_output(timeout=1) out_pkt = self.pmd_output.execute_cmd("stop", timeout=15) out = out_info + out_pkt self.pmd_output.execute_cmd("start") @@ -998,7 +998,7 @@ class ICEDCFACLFilterTest(TestCase): create acl filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) created") rule_list = [] acl_rule = "Succeeded to create (4) flow" @@ -1038,7 +1038,7 @@ class ICEDCFACLFilterTest(TestCase): create switch or fdir filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) created") rule_list = [] if isinstance(rules, list): @@ -1077,7 +1077,7 @@ class ICEDCFACLFilterTest(TestCase): check the rules in list identical to ones in rule_list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node out = session_name.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) @@ -1101,7 +1101,7 @@ class ICEDCFACLFilterTest(TestCase): self, port_id, rule_list, session_name="", need_verify=True ): if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) destroyed") destroy_list = [] if isinstance(rule_list, list): @@ -1142,7 +1142,7 @@ class ICEDCFACLFilterTest(TestCase): test_results = {} for test_vector in vectors: try: - self.dut.send_expect("flow flush 0", "testpmd> ", 120) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 120) # create a rule rule_list = self.create_acl_filter_rule(test_vector["rules"]) @@ -1250,7 +1250,7 @@ class ICEDCFACLFilterTest(TestCase): self.check_filter_rule_list(0, []) # full mask rules are created as switch rules self.create_acl_filter_rule(rules["full mask"], check_stats=False) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) # inconsistent spec and mask rules rule_list1 = self.create_acl_filter_rule( rules["inconsistent spec and mask"], check_stats=True @@ -1265,7 +1265,7 @@ class ICEDCFACLFilterTest(TestCase): rfc.check_drop( out1, pkt_num=2, check_param={"port_id": 1, "drop": 1}, stats=True ) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) # acl rules combined "0" mask and not "0" mask rule_list2 = self.create_acl_filter_rule(rules["acl rules"], check_stats=True) self.check_filter_rule_list(0, rule_list2) @@ -1303,13 +1303,13 @@ class ICEDCFACLFilterTest(TestCase): count = count + 1 flows.close() self.verify(count == 160, "failed to config 160 acl rules.") - self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, self.sut_file_dir) # start testpmd with creating 512 ACL rules - param = " --cmdline-file=%s" % (self.dut_file_dir + src_file) + param = " --cmdline-file=%s" % (self.sut_file_dir + src_file) out_testpmd = self.launch_testpmd(param) self.check_dcf_status(out_testpmd, stats=True) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("159" in rule_list, "160 rules failed to be created") # create one more ACl rule failed, it is created as a switch rule. @@ -1317,12 +1317,12 @@ class ICEDCFACLFilterTest(TestCase): self.create_acl_filter_rule(rule, check_stats=False) # delete one ACL rule, create the rule again, it is created as an ACL rule successfully. - self.dut.send_expect("flow destroy 0 rule 159", "testpmd> ", 15) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 159", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("159" not in rule_list, "rule 159 is not deleted") self.create_acl_filter_rule(rule, check_stats=True) # delete the switch rule - self.dut.send_expect("flow destroy 0 rule 160", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 160", "testpmd> ", 15) # send and check match packets packet = ( 'Ether(src="00:11:22:33:44:55", dst="%s")/IP(src="192.168.2.255", dst="192.168.0.2")/TCP(sport=22, dport=23)/Raw(load="x"*30)' @@ -1334,7 +1334,7 @@ class ICEDCFACLFilterTest(TestCase): ) # delete all rules, send and check match packets - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) out1 = self.send_pkts_getouput(pkts=packet) rfc.check_drop( out1, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=False @@ -1355,13 +1355,13 @@ class ICEDCFACLFilterTest(TestCase): count = count + 1 flows.close() self.verify(count == 255, "failed to config 255 acl rules.") - self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, self.sut_file_dir) # start testpmd with creating 255 ACL rules - param = " --cmdline-file=%s" % (self.dut_file_dir + src_file) + param = " --cmdline-file=%s" % (self.sut_file_dir + src_file) out_testpmd = self.launch_testpmd(param) self.check_dcf_status(out_testpmd, stats=True) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("254" in rule_list, "255 rules failed to be created") # create a switch rule @@ -1371,7 +1371,7 @@ class ICEDCFACLFilterTest(TestCase): # create the 256 ACl rule rule1 = "flow create 0 ingress pattern eth / ipv4 src spec 192.168.2.255 src mask 255.0.255.255 / tcp / end actions drop / end" self.create_acl_filter_rule(rule1, check_stats=True) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("256" in rule_list, "the last ACL rule failed to be created") # send and check match packets @@ -1398,9 +1398,9 @@ class ICEDCFACLFilterTest(TestCase): ) # delete one rule, create the rule again, successfully. - self.dut.send_expect("flow destroy 0 rule 256", "testpmd> ", 15) - self.dut.send_expect("flow destroy 0 rule 257", "testpmd> ", 15) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 256", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 257", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("256" not in rule_list, "rule 256 is not deleted") self.verify("257" not in rule_list, "rule 257 is not deleted") self.create_acl_filter_rule(rule2, check_stats=True) @@ -1411,7 +1411,7 @@ class ICEDCFACLFilterTest(TestCase): ) # delete all rules, send and check match packets - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) out4 = self.send_pkts_getouput(pkts=packet2) rfc.check_drop( out4, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=False @@ -1432,13 +1432,13 @@ class ICEDCFACLFilterTest(TestCase): count = count + 1 flows.close() self.verify(count == 63, "failed to config 63 acl rules.") - self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, self.sut_file_dir) # start testpmd with creating 64 ACL rules - param = " --cmdline-file=%s" % (self.dut_file_dir + src_file) + param = " --cmdline-file=%s" % (self.sut_file_dir + src_file) out_testpmd = self.launch_testpmd(param) self.check_dcf_status(out_testpmd, stats=True) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("62" in rule_list, "63 rules failed to be created") # create one switch rule @@ -1454,13 +1454,13 @@ class ICEDCFACLFilterTest(TestCase): self.create_acl_filter_rule(rule2, check_stats=False) # delete one rule, create the rule again, successfully. - self.dut.send_expect("flow destroy 0 rule 64", "testpmd> ", 15) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 64", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("64" not in rule_list, "rule 64 is not deleted") self.create_acl_filter_rule(rule2, check_stats=True) # delete switch rule - self.dut.send_expect("flow destroy 0 rule 65", "testpmd> ", 15) - rule_list = self.dut.send_expect("flow list 0", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 65", "testpmd> ", 15) + rule_list = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.verify("65" not in rule_list, "rule 65 is not deleted") # send and check match packets @@ -1474,7 +1474,7 @@ class ICEDCFACLFilterTest(TestCase): ) # delete all rules, send and check match packets - self.dut.send_expect("flow flush 0", "testpmd> ", 60) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 60) out1 = self.send_pkts_getouput(pkts=packet) rfc.check_drop( out1, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=False @@ -1728,7 +1728,7 @@ class ICEDCFACLFilterTest(TestCase): out_noreceived, pkt_num=0, check_param={"port_id": 1, "passthru": 1} ) - self.dut.send_expect("flow destroy 0 rule 4", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 4", "testpmd> ", 15) packets = { "mark": 'Ether(src="00:11:22:33:44:55", dst="00:01:23:45:67:88")/IP(src="192.168.0.2", dst="192.168.0.20")/TCP(sport=22,dport=23)/Raw(load="x"*30)', @@ -1754,14 +1754,14 @@ class ICEDCFACLFilterTest(TestCase): "Failed to init DCF parent adapter" not in out_testpmd, "request for DCF is rejected.", ) - out_portinfo = self.dut.send_expect("show port info 0", "testpmd> ", 15) + out_portinfo = self.sut_node.send_expect("show port info 0", "testpmd> ", 15) self.verify("net_ice_dcf" in out_portinfo, "request for DCF is rejected.") else: self.verify( "Failed to init DCF parent adapter" in out_testpmd, "request for DCF is accepted.", ) - out_portinfo = self.dut.send_expect("show port info 0", "testpmd> ", 15) + out_portinfo = self.sut_node.send_expect("show port info 0", "testpmd> ", 15) self.verify( "net_ice_dcf" not in out_portinfo, "request for DCF is accepted." ) @@ -1772,19 +1772,19 @@ class ICEDCFACLFilterTest(TestCase): """ if self.testpmd_status != "close": # destroy all flow rules on DCF - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 30) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 30) # kill all DPDK application - self.dut.kill_all() + self.sut_node.kill_all() self.testpmd_status = "close" def test_mutually_exclusive(self): """ DCF mode and any ACL filters (not added by DCF) shall be mutually exclusive """ - self.dut.kill_all() - self.session_secondary = self.dut.new_session() + self.sut_node.kill_all() + self.session_secondary = self.sut_node.new_session() # add ACL rule by kernel, reject request for DCF functionality self.add_acl_rule_not_by_dcf(self.pf0_intf, stats=True) @@ -1841,22 +1841,22 @@ class ICEDCFACLFilterTest(TestCase): """ delete all the acl rule added not by DCF """ - out_pf0 = self.dut.send_expect("ethtool -n %s" % (self.pf0_intf), "# ") - out_pf1 = self.dut.send_expect("ethtool -n %s" % (self.pf1_intf), "# ") + out_pf0 = self.sut_node.send_expect("ethtool -n %s" % (self.pf0_intf), "# ") + out_pf1 = self.sut_node.send_expect("ethtool -n %s" % (self.pf1_intf), "# ") p = re.compile(r"Filter: (\d+)") m0 = p.search(out_pf0) m1 = p.search(out_pf1) if m0: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool -N %s delete %d" % (self.pf0_intf, int(m0.group(1))), "# " ) - self.dut.send_expect("ethtool -n %s" % (self.pf0_intf), "Total 0 rules") + self.sut_node.send_expect("ethtool -n %s" % (self.pf0_intf), "Total 0 rules") if m1: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool -N %s delete %d" % (self.pf1_intf, int(m1.group(1))), "# " ) - self.dut.send_expect("ethtool -n %s" % (self.pf1_intf), "Total 0 rules") + self.sut_node.send_expect("ethtool -n %s" % (self.pf1_intf), "Total 0 rules") def tear_down(self): """ @@ -1864,11 +1864,11 @@ class ICEDCFACLFilterTest(TestCase): """ self.quit_testpmd() if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) self.delete_acl_rule_not_added_by_dcf() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_dcf_date_path.py b/tests/TestSuite_ice_dcf_date_path.py index 372f8ca1..15559723 100644 --- a/tests/TestSuite_ice_dcf_date_path.py +++ b/tests/TestSuite_ice_dcf_date_path.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -15,24 +15,24 @@ class DcfDatePathTest(TestCase): wrong_mac = "68:05:CA:8D:ED:A8" def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.used_dut_port = self.dut_ports[0] - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.used_sut_port = self.sut_ports[0] + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vf_driver = self.get_suite_cfg()["vf_driver"] - self.dut_intf0 = self.dut.ports_info[self.used_dut_port]["intf"] - self.tester_intf0 = self.tester.get_interface(self.tester.get_local_port(0)) + self.sut_intf0 = self.sut_node.ports_info[self.used_sut_port]["intf"] + self.tg_intf0 = self.tg_node.get_interface(self.tg_node.get_local_port(0)) # Generate 1 trust VF on 1 PF, and request 1 DCF on the trust VF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 4, self.kdriver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.dut_ports[0]]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 4, self.kdriver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.sut_ports[0]]["vfs_port"] self.used_vf_pci = self.sriov_vfs_port_0[0].pci # config vf trust on and vf mac value - self.dut.send_expect("ip link set %s vf 0 trust on" % self.dut_intf0, "#") - self.dut.send_expect( - "ip link set {} vf 0 mac {}".format(self.dut_intf0, self.vf_mac), "#" + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.sut_intf0, "#") + self.sut_node.send_expect( + "ip link set {} vf 0 mac {}".format(self.sut_intf0, self.vf_mac), "#" ) self.sriov_vfs_port_0[0].bind_driver(self.vf_driver) - self.pmd_output = PmdOutput(self.dut) - self.pkt = Packet() + self.pmd_output = PmdOutput(self.sut_node) + self.scapy_pkt_builder = ScapyPacketBuilder() def set_up(self): self.pmd_output.start_testpmd( @@ -44,8 +44,8 @@ class DcfDatePathTest(TestCase): ) def send_packets(self, packets, tx_port, count=1): - self.pkt.update_pkt(packets) - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count) + self.scapy_pkt_builder.update_pkt(packets) + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port, count=count) def send_pkts_getouput(self, pkts, tx_port, count=1, status=False): # Get the DCF package information @@ -66,13 +66,13 @@ class DcfDatePathTest(TestCase): """ self.pmd_output.execute_cmd("set fwd mac") self.pmd_output.execute_cmd("start") - inst = self.tester.tcpdump_sniff_packets(self.tester_intf0) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf0) pkts_cmd = 'Ether(dst="{}", src="00:11:22:33:44:55")/IP(src="192.168.1.1",dst="192.168.1.3")/Raw("x"*64)'.format( self.vf_mac ) - self.send_packets(pkts_cmd, self.tester_intf0, count=100) + self.send_packets(pkts_cmd, self.tg_intf0, count=100) time.sleep(2) - p = self.tester.load_tcpdump_sniff_packets(inst) + p = self.tg_node.load_tcpdump_sniff_packets(inst) self.verify( len(p) == 100, "send 100 packets received %d packets, not match" % len(p) ) @@ -129,22 +129,22 @@ class DcfDatePathTest(TestCase): self.pmd_output.execute_cmd("set fwd rxonly") self.pmd_output.execute_cmd("set verbose 1") self.pmd_output.execute_cmd("start") - out1 = self.send_pkts_getouput(pkt_list1, self.tester_intf0) + out1 = self.send_pkts_getouput(pkt_list1, self.tg_intf0) self.verify( out1[0] == out1[3] and out1[0] != out1[1] != out1[2], "ipv4 rss hash value test failed", ) - out2 = self.send_pkts_getouput(pkt_list2, self.tester_intf0) + out2 = self.send_pkts_getouput(pkt_list2, self.tg_intf0) self.verify( out2[0] == out2[3] and out2[0] != out2[1] != out2[2], "ipv6 rss hash value test failed", ) - out3 = self.send_pkts_getouput(pkt_list3, self.tester_intf0) + out3 = self.send_pkts_getouput(pkt_list3, self.tg_intf0) self.verify( out3[0] == out3[3] == out3[4] and out3[0] != out3[1] != out3[2], "inner ipv4 rss hash value test failed", ) - out4 = self.send_pkts_getouput(pkt_list4, self.tester_intf0) + out4 = self.send_pkts_getouput(pkt_list4, self.tg_intf0) self.verify( out4[0] == out4[3] == out4[4] and out4[0] != out4[1] != out4[2], "inner ipv6 rss hash value test failed", @@ -218,23 +218,23 @@ class DcfDatePathTest(TestCase): self.pmd_output.execute_cmd("set fwd rxonly") self.pmd_output.execute_cmd("set verbose 1") self.pmd_output.execute_cmd("start") - out1 = self.send_pkts_getouput(pkt_list1, self.tester_intf0) + out1 = self.send_pkts_getouput(pkt_list1, self.tg_intf0) self.verify( out1[0] == out1[5] and out1[0] != out1[1] != out1[2] != out1[3] != out1[4], "ipv4 rss hash value test failed", ) - out2 = self.send_pkts_getouput(pkt_list2, self.tester_intf0) + out2 = self.send_pkts_getouput(pkt_list2, self.tg_intf0) self.verify( out2[0] == out2[5] and out2[0] != out2[1] != out2[2] != out2[3] != out2[4], "ipv6 rss hash value test failed", ) - out3 = self.send_pkts_getouput(pkt_list3, self.tester_intf0) + out3 = self.send_pkts_getouput(pkt_list3, self.tg_intf0) self.verify( out3[0] == out3[6] and out3[0] != out3[1] != out3[2] != out3[3] != out3[4] != out3[5], "inner ipv4 rss hash value test failed", ) - out4 = self.send_pkts_getouput(pkt_list4, self.tester_intf0) + out4 = self.send_pkts_getouput(pkt_list4, self.tg_intf0) self.verify( out4[0] == out4[6] and out4[0] != out4[1] != out4[2] != out4[3] != out4[4] != out4[5], @@ -257,18 +257,18 @@ class DcfDatePathTest(TestCase): self.pmd_output.execute_cmd("set fwd rxonly") self.pmd_output.execute_cmd("set verbose 1") self.pmd_output.execute_cmd("start") - out = self.send_pkts_getouput(pkt, self.tester_intf0, status=True) + out = self.send_pkts_getouput(pkt, self.tg_intf0, status=True) self.verify(self.wrong_mac not in out, "The wrong mac packet was received") self.pmd_output.execute_cmd(rule, "created") - out = self.send_pkts_getouput(pkt, self.tester_intf0, status=True) + out = self.send_pkts_getouput(pkt, self.tg_intf0, status=True) self.verify(self.wrong_mac in out, "The wrong mac packet not received") self.pmd_output.execute_cmd("flow destroy 0 rule 0", "destroyed") - out = self.send_pkts_getouput(pkt, self.tester_intf0, status=True) + out = self.send_pkts_getouput(pkt, self.tg_intf0, status=True) self.verify(self.wrong_mac not in out, "The wrong mac packet was received") def tear_down(self): self.pmd_output.execute_cmd("quit", "#") def tear_down_all(self): - self.dut.kill_all() - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.kill_all() + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) diff --git a/tests/TestSuite_ice_dcf_flow_priority.py b/tests/TestSuite_ice_dcf_flow_priority.py index 143cf1eb..194cc93d 100644 --- a/tests/TestSuite_ice_dcf_flow_priority.py +++ b/tests/TestSuite_ice_dcf_flow_priority.py @@ -9,8 +9,8 @@ import re import time import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from framework.utils import BLUE, GREEN, RED @@ -1836,7 +1836,7 @@ class ICEDCFFlowPriorityTest(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -1844,13 +1844,13 @@ class ICEDCFFlowPriorityTest(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -1862,40 +1862,40 @@ class ICEDCFFlowPriorityTest(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - self.pkt = Packet() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + self.scapy_pkt_builder = ScapyPacketBuilder() self.testpmd_status = "close" # bind pf to kernel - self.bind_nics_driver(self.dut_ports, driver="ice") + self.bind_nics_driver(self.sut_ports, driver="ice") # get PF interface name - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state(self.pf0_intf, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.pf0_intf, self.flag) # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.path = self.sut_node.apps_name["test-pmd"] def setup_1pf_vfs_env(self, pf_port=0, driver="default"): - self.used_dut_port_0 = self.dut_ports[pf_port] + self.used_sut_port_0 = self.sut_ports[pf_port] # get PF interface name - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s off" % (self.pf0_intf, self.flag), "# " ) # generate 4 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 4, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 4, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] # set VF0 as trust - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") # bind VFs to dpdk driver for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) @@ -1906,8 +1906,8 @@ class ICEDCFFlowPriorityTest(TestCase): Run before each test case. """ # Switch's recpri resource cannot be released,so need to reload ice driver to release it, this is a known issue of ND - self.dut.send_expect("rmmod ice", "#", 30) - self.dut.send_expect("modprobe ice", "#", 30) + self.sut_node.send_expect("rmmod ice", "#", 30) + self.sut_node.send_expect("modprobe ice", "#", 30) def create_testpmd_command(self): """ @@ -1918,7 +1918,7 @@ class ICEDCFFlowPriorityTest(TestCase): vf1_pci = self.sriov_vfs_port_0[1].pci vf2_pci = self.sriov_vfs_port_0[2].pci vf3_pci = self.sriov_vfs_port_0[3].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci, vf2_pci, vf3_pci], port_options={vf0_pci: "cap=dcf"}, @@ -1931,25 +1931,25 @@ class ICEDCFFlowPriorityTest(TestCase): launch testpmd with the command """ command = self.create_testpmd_command() - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - # self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + # self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def send_and_check_packets(self, dic, session_name="", tx_iface=""): """ general packets processing workflow. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) dic["check_func"]["func"]( @@ -1963,7 +1963,7 @@ class ICEDCFFlowPriorityTest(TestCase): validate switch filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = "Flow rule validated" rule_list = [] if isinstance(rte_flow_pattern, list): @@ -2005,7 +2005,7 @@ class ICEDCFFlowPriorityTest(TestCase): create switch filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) created") rule_list = [] if isinstance(rte_flow_pattern, list): @@ -2045,7 +2045,7 @@ class ICEDCFFlowPriorityTest(TestCase): check the rules in list identical to ones in rule_list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node out = session_name.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) @@ -2069,7 +2069,7 @@ class ICEDCFFlowPriorityTest(TestCase): self, port_id, rule_list, session_name="", need_verify=True ): if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) destroyed") destroy_list = [] if isinstance(rule_list, list): @@ -2497,9 +2497,9 @@ class ICEDCFFlowPriorityTest(TestCase): "expect_results": {"expect_pkts": 1}, } self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 15) # subcase 2: same pattern/input set/priority different action self.launch_testpmd() @@ -2519,9 +2519,9 @@ class ICEDCFFlowPriorityTest(TestCase): "expect_results": {"expect_pkts": [1, 1]}, } self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 15) # subcase 3: some rules overlap self.launch_testpmd() @@ -2546,17 +2546,17 @@ class ICEDCFFlowPriorityTest(TestCase): "expect_results": {"expect_pkts": [1, 1]}, } self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow destroy 0 rule 5", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 5", "testpmd> ", 15) self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow destroy 0 rule 4", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 4", "testpmd> ", 15) matched_dic["check_func"]["param"]["expect_port"] = 2 matched_dic["expect_results"]["expect_pkts"] = 1 self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow destroy 0 rule 3", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 3", "testpmd> ", 15) matched_dic["check_func"]["param"]["expect_port"] = 1 matched_dic["expect_results"]["expect_pkts"] = 1 self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow destroy 0 rule 1", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd> ", 15) matched_dic["expect_results"]["expect_pkts"] = 0 self.send_and_check_packets(matched_dic) matched_dic = { @@ -2570,10 +2570,10 @@ class ICEDCFFlowPriorityTest(TestCase): "expect_results": {"expect_pkts": 1}, } self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow destroy 0 rule 2", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 2", "testpmd> ", 15) matched_dic["check_func"]["param"]["expect_port"] = 1 self.send_and_check_packets(matched_dic) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 15) matched_dic["expect_results"]["expect_pkts"] = 0 self.send_and_check_packets(matched_dic) @@ -2583,25 +2583,25 @@ class ICEDCFFlowPriorityTest(TestCase): """ if self.testpmd_status != "close": # destroy all flow rules on DCF - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 15) # kill all DPDK application - self.dut.kill_all() + self.sut_node.kill_all() # destroy vfs - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) self.testpmd_status = "close" if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf0_intf, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_ice_dcf_qos.py b/tests/TestSuite_ice_dcf_qos.py index f1890b4d..b22243a5 100644 --- a/tests/TestSuite_ice_dcf_qos.py +++ b/tests/TestSuite_ice_dcf_qos.py @@ -17,25 +17,24 @@ from copy import deepcopy from pprint import pformat from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT from framework.pmd_output import PmdOutput -from framework.settings import HEADER_SIZE, NICS, get_nic_name +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import HEADER_SIZE, NICS, TRANSMIT_CONT, get_nic_name from framework.test_case import TestCase class TestICEDcfQos(TestCase): def d_con(self, cmd): _cmd = [cmd, "# ", 15] if isinstance(cmd, str) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def d_a_con(self, cmds): prompt = "# " if isinstance(cmds, str): _cmd = [cmds, prompt, 20] - return self.dut.alt_session.send_expect(*_cmd) + return self.sut_node.alt_session.send_expect(*_cmd) else: - return [self.dut.alt_session.send_expect(_cmd, prompt, 20) for _cmd in cmds] + return [self.sut_node.alt_session.send_expect(_cmd, prompt, 20) for _cmd in cmds] def pmd_con(self, cmds): prompt = "testpmd> " @@ -76,18 +75,18 @@ class TestICEDcfQos(TestCase): values = pkt_config pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - return pkt.pktgen.pkt + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + return scapy_pkt_builder.scapy_pkt_util.pkt def add_stream_to_pktgen(self, txport, rxport, send_pkts, option): stream_ids = [] for pkt in send_pkts: _option = deepcopy(option) _option["pcap"] = pkt - stream_id = self.tester.pktgen.add_stream(txport, rxport, send_pkts[0]) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, send_pkts[0]) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) return stream_ids @@ -97,7 +96,7 @@ class TestICEDcfQos(TestCase): rate_percent = option.get("rate_percent", float(100)) duration = option.get("duration", 20) send_pkts = option.get("stream") or [] - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() s_option = { "stream_config": { "txmode": {}, @@ -122,12 +121,12 @@ class TestICEDcfQos(TestCase): "interval": duration - 5, "callback": self.testpmd_query_stats, } - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) return result def check_traffic(self, stream_configs, traffic_tasks, frame_size=68): - tester_rx_port_id = tester_tx_port_id = self.tester.get_local_port( - self.dut_ports[0] + tg_rx_port_id = tg_tx_port_id = self.tg_node.get_local_port( + self.sut_ports[0] ) duration = 20 results = [] @@ -138,8 +137,8 @@ class TestICEDcfQos(TestCase): for idx in stream_ids: streams.append(self.config_stream(stream_configs[idx], frame_size)) ports_topo = { - "tx_intf": tester_tx_port_id, - "rx_intf": tester_rx_port_id, + "tx_intf": tg_tx_port_id, + "rx_intf": tg_rx_port_id, "stream": streams, "duration": duration, "rate_percent": rate_percent, @@ -190,16 +189,16 @@ class TestICEDcfQos(TestCase): def get_custom_nic_port(self, nic_name, num=None): cnt = 0 - for dut_port_id in self.dut.get_ports(): - port_type = self.dut.ports_info[dut_port_id]["type"] - intf = self.dut.ports_info[dut_port_id]["intf"] - pci = self.dut.ports_info[dut_port_id]["pci"] + for sut_port_id in self.sut_node.get_ports(): + port_type = self.sut_node.ports_info[sut_port_id]["type"] + intf = self.sut_node.ports_info[sut_port_id]["intf"] + pci = self.sut_node.ports_info[sut_port_id]["pci"] _nic_name = get_nic_name(port_type) if _nic_name in nic_name: if num and cnt != num: cnt += 1 continue - return dut_port_id, intf, pci + return sut_port_id, intf, pci return None, None, None def pf_preset(self, num=None): @@ -216,7 +215,7 @@ class TestICEDcfQos(TestCase): ) msg = "not enough nics for testing" self.verify(self.nic_100g is not None and self.nic_25g is not None, msg) - port_obj = self.dut.ports_info[self.nic_100g]["port"] + port_obj = self.sut_node.ports_info[self.nic_100g]["port"] port_obj.bind_driver(port_obj.default_driver) cmds = [ f"{self.dcbgetset} {self.nic100G_intf} --ieee --up2tc 0,0,0,1,2,0,0,0 --tcbw 10,30,60,0,0,0,0,0,0 --tsa 0,0,0,0,0,0,0,0 --pfc 0,0,0,0,0,0,0,0", @@ -225,13 +224,13 @@ class TestICEDcfQos(TestCase): f"lldptool -T -i {self.nic100G_intf} -V ETS-CFG willing=no", ] self.d_a_con(cmds) - port_obj = self.dut.ports_info[self.nic_25g]["port"] + port_obj = self.sut_node.ports_info[self.nic_25g]["port"] port_obj.bind_driver(port_obj.default_driver) def pf_restore(self): - port_obj = self.dut.ports_info[self.nic_100g]["port"] + port_obj = self.sut_node.ports_info[self.nic_100g]["port"] port_obj.bind_driver(self.drivername) - port_obj = self.dut.ports_info[self.nic_25g]["port"] + port_obj = self.sut_node.ports_info[self.nic_25g]["port"] port_obj.bind_driver(self.drivername) def vf_init(self): @@ -256,13 +255,13 @@ class TestICEDcfQos(TestCase): ] self.d_a_con(cmds) for index, port_id in enumerate(valports): - port_obj = self.dut.ports_info[port_id]["port"] + port_obj = self.sut_node.ports_info[port_id]["port"] pf_driver = port_obj.default_driver - self.dut.generate_sriov_vfs_by_port(port_id, vf_num, driver=pf_driver) + self.sut_node.generate_sriov_vfs_by_port(port_id, vf_num, driver=pf_driver) pf_pci = port_obj.pci - sriov_vfs_port = self.dut.ports_info[port_id].get("vfs_port") + sriov_vfs_port = self.sut_node.ports_info[port_id].get("vfs_port") if not sriov_vfs_port: - msg = f"failed to create vf on dut port {pf_pci}" + msg = f"failed to create vf on SUT port {pf_pci}" self.logger.error(msg) continue for port in sriov_vfs_port: @@ -293,11 +292,11 @@ class TestICEDcfQos(TestCase): if not self.vf_ports_info: return for port_id, _ in self.vf_ports_info.items(): - port_obj = self.dut.ports_info[port_id]["port"] + port_obj = self.sut_node.ports_info[port_id]["port"] pf_intf = port_obj.get_interface_name() cmd = f"ip link set dev {pf_intf} vf 0 trust off" self.d_a_con(cmd) - self.dut.destroy_sriov_vfs_by_port(port_id) + self.sut_node.destroy_sriov_vfs_by_port(port_id) pf_driver = port_obj.default_driver port_obj.bind_driver(pf_driver) self.vf_ports_info = {} @@ -417,7 +416,7 @@ class TestICEDcfQos(TestCase): self.verify(False, msg) def testpmd_init(self): - self.pmd_output = PmdOutput(self.dut) + self.pmd_output = PmdOutput(self.sut_node) self.is_pmd_on = False def testpmd_start(self, vfs_group): @@ -2674,7 +2673,7 @@ class TestICEDcfQos(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) self.suite_init() def tear_down_all(self): @@ -2691,7 +2690,7 @@ class TestICEDcfQos(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.pf_restore() def test_perf_strict_mode_check_peak_tb_rate(self): diff --git a/tests/TestSuite_ice_dcf_switch_filter.py b/tests/TestSuite_ice_dcf_switch_filter.py index 96c7c30c..29ae04c8 100644 --- a/tests/TestSuite_ice_dcf_switch_filter.py +++ b/tests/TestSuite_ice_dcf_switch_filter.py @@ -9,8 +9,8 @@ import re import time import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from framework.utils import BLUE, GREEN, RED @@ -1652,7 +1652,7 @@ class ICEDCFSwitchFilterTest(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -1660,13 +1660,13 @@ class ICEDCFSwitchFilterTest(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -1678,49 +1678,49 @@ class ICEDCFSwitchFilterTest(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.used_dut_port_0 = self.dut_ports[0] - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] - self.__tx_iface = self.tester.get_interface(localPort) - self.pkt = Packet() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.used_sut_port_0 = self.sut_ports[0] + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + self.__tx_iface = self.tg_node.get_interface(localPort) + self.scapy_pkt_builder = ScapyPacketBuilder() self.testpmd_status = "close" # bind pf to kernel - self.bind_nics_driver(self.dut_ports, driver="ice") + self.bind_nics_driver(self.sut_ports, driver="ice") # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state(self.pf0_intf, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.pf0_intf, self.flag) # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.path = self.sut_node.apps_name["test-pmd"] def setup_1pf_vfs_env(self, pf_port=0, driver="default"): - self.used_dut_port_0 = self.dut_ports[pf_port] + self.used_sut_port_0 = self.sut_ports[pf_port] # get PF interface name - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] - out = self.dut.send_expect("ethtool -i %s" % self.pf0_intf, "#") + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + out = self.sut_node.send_expect("ethtool -i %s" % self.pf0_intf, "#") if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s off" % (self.pf0_intf, self.flag), "# " ) # generate 4 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 4, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 4, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] # set VF0 as trust - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") # bind VFs to dpdk driver for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) time.sleep(5) def reload_ice(self): - self.dut.send_expect("rmmod ice", "# ", 15) - self.dut.send_expect("modprobe ice", "# ", 15) + self.sut_node.send_expect("rmmod ice", "# ", 15) + self.sut_node.send_expect("modprobe ice", "# ", 15) def set_up(self): """ @@ -1735,7 +1735,7 @@ class ICEDCFSwitchFilterTest(TestCase): # Prepare testpmd EAL and parameters vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci], port_options={vf0_pci: "cap=dcf"}, @@ -1748,25 +1748,25 @@ class ICEDCFSwitchFilterTest(TestCase): launch testpmd with the command """ command = self.create_testpmd_command() - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def send_packets(self, dic, session_name="", tx_iface=""): """ send packets. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ") return out @@ -1776,14 +1776,14 @@ class ICEDCFSwitchFilterTest(TestCase): general packets processing workflow. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) dic["check_func"]["func"]( @@ -1795,15 +1795,15 @@ class ICEDCFSwitchFilterTest(TestCase): general packets processing workflow. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - pkt = Packet() - pkt.update_pkt(dic["scapy_str"]) - pkt.send_pkt_bg(self.tester, tx_port=tx_iface, count=1, loop=0, timeout=370) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.update_pkt(dic["scapy_str"]) + scapy_pkt_builder.send_pkt_bg(self.tg_node, tx_port=tx_iface, count=1, loop=0, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) results = dic["check_func"]["func"]( @@ -1818,7 +1818,7 @@ class ICEDCFSwitchFilterTest(TestCase): validate switch filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = "Flow rule validated" rule_list = [] if isinstance(rte_flow_pattern, list): @@ -1860,7 +1860,7 @@ class ICEDCFSwitchFilterTest(TestCase): create switch filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) created") rule_list = [] if isinstance(rte_flow_pattern, list): @@ -1900,7 +1900,7 @@ class ICEDCFSwitchFilterTest(TestCase): check the rules in list identical to ones in rule_list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node out = session_name.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) @@ -1924,7 +1924,7 @@ class ICEDCFSwitchFilterTest(TestCase): self, port_id, rule_list, session_name="", need_verify=True ): if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) destroyed") destroy_list = [] if isinstance(rule_list, list): @@ -2208,10 +2208,10 @@ class ICEDCFSwitchFilterTest(TestCase): # check no rule in the list self.check_switch_filter_rule_list(0, []) # destroy rule 0 - out = self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", timeout=15) + out = self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", timeout=15) self.verify("Fail" not in out, "Destroy failed.") # flush rules - out = self.dut.send_expect("flow flush 0", "testpmd> ", timeout=15) + out = self.sut_node.send_expect("flow flush 0", "testpmd> ", timeout=15) self.verify("Fail" not in out, "Destroy failed.") # add long switch rule @@ -2245,12 +2245,12 @@ class ICEDCFSwitchFilterTest(TestCase): self.create_switch_filter_rule(rules, check_stats=False) self.check_switch_filter_rule_list(0, []) - self.dut.send_expect("flow flush 0", "testpmd> ", 300) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("flow flush 0", "testpmd> ", 300) + self.sut_node.send_expect("quit", "#") self.testpmd_status = "close" # destroy vfs - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) def test_add_existing_rules_but_with_different_vfs(self): # set up 4 vfs on 1 pf environment @@ -2259,17 +2259,17 @@ class ICEDCFSwitchFilterTest(TestCase): vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci vf2_pci = self.sriov_vfs_port_0[2].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci, vf2_pci], port_options={vf0_pci: "cap=dcf"}, ) command = self.path + all_eal_param + " -- -i" - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1,2", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1,2", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) # create rules with same pattern but to different vfs rule_list = self.create_switch_filter_rule( tv_add_existing_rules_but_with_different_vfs["rte_flow_pattern"] @@ -2316,17 +2316,17 @@ class ICEDCFSwitchFilterTest(TestCase): vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci vf2_pci = self.sriov_vfs_port_0[2].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci, vf2_pci], port_options={vf0_pci: "cap=dcf"}, ) command = self.path + all_eal_param + " -- -i" - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1,2", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1,2", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) # create rules with one rule's input set included in the other rule_list = self.create_switch_filter_rule( tv_add_two_rules_with_one_rule_input_set_included_in_the_other[ @@ -2370,11 +2370,11 @@ class ICEDCFSwitchFilterTest(TestCase): self.setup_1pf_vfs_env() # launch testpmd command = self.create_testpmd_command() - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd mac", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd mac", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) # create a rule rule_list = self.create_switch_filter_rule( tv_test_fwd_with_single_vf["rte_flow_pattern"] @@ -2400,17 +2400,17 @@ class ICEDCFSwitchFilterTest(TestCase): vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci vf2_pci = self.sriov_vfs_port_0[2].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci, vf2_pci], port_options={vf0_pci: "cap=dcf"}, ) command = self.path + all_eal_param + " -- -i" - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1,2", "testpmd> ", 15) - self.dut.send_expect("set fwd mac", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1,2", "testpmd> ", 15) + self.sut_node.send_expect("set fwd mac", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) # create a rule rule_list = self.create_switch_filter_rule( tv_test_fwd_with_multi_vfs["rte_flow_pattern"] @@ -2437,16 +2437,16 @@ class ICEDCFSwitchFilterTest(TestCase): def test_max_vfs(self): # get max vfs number - max_vf_number = int(256 / (len(self.dut_ports))) + max_vf_number = int(256 / (len(self.sut_ports))) # set up max_vf_number vfs on 1 pf environment - self.used_dut_port_0 = self.dut_ports[0] - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] - out = self.dut.send_expect("ethtool -i %s" % self.pf0_intf, "#") + self.used_sut_port_0 = self.sut_ports[0] + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + out = self.sut_node.send_expect("ethtool -i %s" % self.pf0_intf, "#") # generate max_vf_number VFs on PF0 - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, max_vf_number, driver="default" + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, max_vf_number, driver="default" ) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver("iavf") @@ -2459,20 +2459,20 @@ class ICEDCFSwitchFilterTest(TestCase): vf_intf = [value for key, value in vfs_sort] # start the max_vf_number VFs in the kernel for intf in vf_intf: - self.dut.send_expect("ifconfig %s up" % intf, "#") - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") - self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s up" % intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.vf_driver, vf_pci[0]), "# " ) time.sleep(5) vf_intf.pop(0) # launch testpmd vf0_pci = vf_pci[0] - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci], port_options={vf0_pci: "cap=dcf"} ) command = self.path + all_eal_param + " -- -i" - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" # generate max_vf_number-1 rules to each vf and matched packets for i in range(1, max_vf_number): @@ -2486,7 +2486,7 @@ class ICEDCFSwitchFilterTest(TestCase): % i ) tv_max_vfs["matched"]["scapy_str"].append(matched_scapy_str) - out = self.dut.send_expect("show port info all", "testpmd> ", 15) + out = self.sut_node.send_expect("show port info all", "testpmd> ", 15) # create max_vf_number-1 rules rule_list = self.create_switch_filter_rule(tv_max_vfs["rte_flow_pattern"]) self.check_switch_filter_rule_list(0, rule_list) @@ -2500,7 +2500,7 @@ class ICEDCFSwitchFilterTest(TestCase): matched_dic = tv_max_vfs["matched"] out = self.send_packets(matched_dic) # check the max_vf_number-1 packets received by each vf - self.session_secondary = self.dut.new_session(suite="session_secondary") + self.session_secondary = self.sut_node.new_session(suite="session_secondary") # get the log of each kernel vf out_vfs = self.get_kernel_vf_log(vf_intf, self.session_secondary) matched_dic["check_func"]["func"]( @@ -2576,8 +2576,8 @@ class ICEDCFSwitchFilterTest(TestCase): matched_dic = tv_mac_ipv4_udp_pay["matched"] self.send_and_check_packets(matched_dic) # stop the DCF, then start the DCF - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") # send matched packets, port 1 can not receive the packets. destroy_dict = copy.deepcopy(matched_dic) destroy_dict["expect_results"] = {"expect_pkts": 0} @@ -2618,17 +2618,17 @@ class ICEDCFSwitchFilterTest(TestCase): vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci vf2_pci = self.sriov_vfs_port_0[2].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci, vf2_pci], port_options={vf0_pci: "cap=dcf"}, ) command = self.path + all_eal_param + " -- -i" - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1,2", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1,2", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) self._rte_flow_validate_pattern( tv_add_two_rules_with_different_input_set_different_vf_id, False ) @@ -2636,7 +2636,7 @@ class ICEDCFSwitchFilterTest(TestCase): @skip_unsupported_pkg(["os default", "wireless"]) def test_mac_drop_action(self): self.setup_1pf_vfs_env() - self.dut.send_expect( + self.sut_node.send_expect( 'ip link set %s vf 1 mac "00:11:22:33:44:55"' % self.pf0_intf, "# " ) self.launch_testpmd() @@ -2663,8 +2663,8 @@ class ICEDCFSwitchFilterTest(TestCase): pattern["matched"]["scapy_str"] ) self.send_and_check_packets(destroy_dict) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) def tear_down(self): """ @@ -2672,25 +2672,25 @@ class ICEDCFSwitchFilterTest(TestCase): """ if self.testpmd_status != "close": # destroy all flow rules on DCF - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 15) # kill all DPDK application - self.dut.kill_all() + self.sut_node.kill_all() # destroy vfs - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) self.testpmd_status = "close" if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf0_intf, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_ice_dcf_switch_filter_gtpu.py b/tests/TestSuite_ice_dcf_switch_filter_gtpu.py index 0f48d78d..31354512 100644 --- a/tests/TestSuite_ice_dcf_switch_filter_gtpu.py +++ b/tests/TestSuite_ice_dcf_switch_filter_gtpu.py @@ -9,8 +9,8 @@ import re import time import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from framework.utils import BLUE, GREEN, RED @@ -3544,22 +3544,22 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - self.pkt = Packet() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + self.scapy_pkt_builder = ScapyPacketBuilder() self.testpmd_status = "close" self.pass_flag = "passed" self.fail_flag = "failed" # bind pf to kernel - self.dut.bind_interfaces_linux("ice") + self.sut_node.bind_interfaces_linux("ice") # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.path = self.sut_node.apps_name["test-pmd"] def setup_1pf_vfs_env(self, pf_port=0, driver="default"): """ @@ -3569,14 +3569,14 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): :param driver: set vf driver """ self.reload_ice() - self.used_dut_port_0 = self.dut_ports[pf_port] + self.used_sut_port_0 = self.sut_ports[pf_port] # get PF interface name - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] # generate 4 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 2, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 2, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] # set VF0 as trust - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") # bind VFs to dpdk driver for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) @@ -3592,8 +3592,8 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): """ dcf switch need reload driver to ensure create rule sucessful """ - self.dut.send_expect("rmmod ice", "# ", 15) - self.dut.send_expect("modprobe ice", "# ", 15) + self.sut_node.send_expect("rmmod ice", "# ", 15) + self.sut_node.send_expect("modprobe ice", "# ", 15) def create_testpmd_command(self): """ @@ -3604,7 +3604,7 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): # Prepare testpmd EAL and parameters vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci], port_options={vf0_pci: "cap=dcf"}, @@ -3617,11 +3617,11 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): launch testpmd with the command """ command = self.create_testpmd_command() - self.dut.send_expect(command, "testpmd> ", 15) + self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def send_and_check_packets(self, dic, session_name="", tx_iface=""): """ @@ -3632,14 +3632,14 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): :param tx_iface: send pkts port """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) rfc.check_vf_rx_packets_number(out, dic["param"], dic["expect_results"]) @@ -3655,7 +3655,7 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): :param check_stats: check requirement validate rule true or false """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = "Flow rule validated" rule_list = [] if isinstance(rte_flow_pattern, list): @@ -3702,7 +3702,7 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): :return: return rule list for destroy rule test """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) created") rule_list = [] if isinstance(rte_flow_pattern, list): @@ -3748,7 +3748,7 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): :return: return not the same as expected rule list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node out = session_name.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) @@ -3781,7 +3781,7 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): :return: return not the same as expected rule list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) destroyed") destroy_list = [] if isinstance(rule_list, list): @@ -3819,17 +3819,17 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): """ if self.testpmd_status != "close": # destroy all flow rules on DCF - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 15) # kill all DPDK application - self.dut.kill_all() + self.sut_node.kill_all() # destroy vfs - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) self.testpmd_status = "close" if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def _rte_flow_validate_pattern(self, test_vector): """ @@ -3877,7 +3877,7 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): except Exception as e: self.logger.warning("sub_case %s failed: %s" % (tv["name"], e)) test_results[tv["name"]] = self.fail_flag - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) pass_rate = ( round( list(test_results.values()).count(self.pass_flag) / len(test_results), 4 @@ -3976,4 +3976,4 @@ class ICEDCFSwitchFilterGTPUTest(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_dcf_switch_filter_pppoe.py b/tests/TestSuite_ice_dcf_switch_filter_pppoe.py index 7b974e01..7fca1e9a 100644 --- a/tests/TestSuite_ice_dcf_switch_filter_pppoe.py +++ b/tests/TestSuite_ice_dcf_switch_filter_pppoe.py @@ -9,8 +9,8 @@ import re import time import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from framework.utils import BLUE, GREEN, RED @@ -798,7 +798,7 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -806,13 +806,13 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -825,50 +825,50 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.used_dut_port_0 = self.dut_ports[0] - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] - self.__tx_iface = self.tester.get_interface(localPort) - self.pkt = Packet() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.used_sut_port_0 = self.sut_ports[0] + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + self.__tx_iface = self.tg_node.get_interface(localPort) + self.scapy_pkt_builder = ScapyPacketBuilder() self.testpmd_status = "close" # bind pf to kernel - self.bind_nics_driver(self.dut_ports, driver="ice") + self.bind_nics_driver(self.sut_ports, driver="ice") # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state(self.pf0_intf, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.pf0_intf, self.flag) # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.path = self.sut_node.apps_name["test-pmd"] def setup_1pf_vfs_env(self, pf_port=0, driver="default"): - self.used_dut_port_0 = self.dut_ports[pf_port] + self.used_sut_port_0 = self.sut_ports[pf_port] # get PF interface name - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] - out = self.dut.send_expect("ethtool -i %s" % self.pf0_intf, "#") + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + out = self.sut_node.send_expect("ethtool -i %s" % self.pf0_intf, "#") if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s off" % (self.pf0_intf, self.flag), "# " ) # generate 4 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 4, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 4, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] # set VF0 as trust - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") # bind VFs to dpdk driver for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) time.sleep(5) def reload_ice(self): - self.dut.send_expect("rmmod ice", "# ", 15) - self.dut.send_expect("modprobe ice", "# ", 15) + self.sut_node.send_expect("rmmod ice", "# ", 15) + self.sut_node.send_expect("modprobe ice", "# ", 15) def set_up(self): """ @@ -883,7 +883,7 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): # Prepare testpmd EAL and parameters vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci], port_options={vf0_pci: "cap=dcf"}, @@ -896,25 +896,25 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): launch testpmd with the command """ command = self.create_testpmd_command() - out = self.dut.send_expect(command, "testpmd> ", 15) + out = self.sut_node.send_expect(command, "testpmd> ", 15) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def send_packets(self, dic, session_name="", tx_iface=""): """ send packets. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ") return out @@ -924,14 +924,14 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): general packets processing workflow. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) dic["check_func"]["func"]( @@ -943,15 +943,15 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): general packets processing workflow. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - pkt = Packet() - pkt.update_pkt(dic["scapy_str"]) - pkt.send_pkt_bg(self.tester, tx_port=tx_iface, count=1, loop=0, timeout=370) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.update_pkt(dic["scapy_str"]) + scapy_pkt_builder.send_pkt_bg(self.tg_node, tx_port=tx_iface, count=1, loop=0, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) results = dic["check_func"]["func"]( @@ -966,7 +966,7 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): validate switch filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = "Flow rule validated" rule_list = [] if isinstance(rte_flow_pattern, list): @@ -1008,7 +1008,7 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): create switch filter rules """ if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) created") rule_list = [] if isinstance(rte_flow_pattern, list): @@ -1048,7 +1048,7 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): check the rules in list identical to ones in rule_list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node out = session_name.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) @@ -1072,7 +1072,7 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): self, port_id, rule_list, session_name="", need_verify=True ): if session_name == "": - session_name = self.dut + session_name = self.sut_node p = re.compile(r"Flow rule #(\d+) destroyed") destroy_list = [] if isinstance(rule_list, list): @@ -1263,25 +1263,25 @@ class ICEDCFSwitchFilterPPPOETest(TestCase): """ if self.testpmd_status != "close": # destroy all flow rules on DCF - self.dut.send_expect("flow flush 0", "testpmd> ", 15) - self.dut.send_expect("clear port stats all", "testpmd> ", 15) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 15) + self.sut_node.send_expect("quit", "#", 15) # kill all DPDK application - self.dut.kill_all() + self.sut_node.kill_all() # destroy vfs - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) self.testpmd_status = "close" if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf0_intf, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_ice_ecpri.py b/tests/TestSuite_ice_ecpri.py index 92d6aa4a..465bdf92 100644 --- a/tests/TestSuite_ice_ecpri.py +++ b/tests/TestSuite_ice_ecpri.py @@ -3,8 +3,8 @@ import time import framework.utils as utils import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -187,19 +187,19 @@ class TestICEEcpri(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - - self.used_dut_port = self.dut_ports[0] - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + + self.used_sut_port = self.sut_ports[0] + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.file_path = "./drivers/net/iavf/iavf_rxtx.c" self.compile_dpdk() self.vf_flag = False @@ -207,12 +207,12 @@ class TestICEEcpri(TestCase): self.pass_flag = "passed" self.fail_flag = "failed" - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.right_ecpri = "0x5123" self.wrong_ecpri = "0x5121" - self.new_session = self.dut.create_session(name="self.new_session") + self.new_session = self.sut_node.create_session(name="self.new_session") def set_up(self): """ @@ -227,20 +227,20 @@ class TestICEEcpri(TestCase): timeout=10, ) self.launch_testpmd() - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() def create_iavf(self): if self.vf_flag is False: - self.dut.bind_interfaces_linux("ice") - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 4) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.bind_interfaces_linux("ice") + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 4) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True try: for i in range(len(self.sriov_vfs_port)): if i != len(self.sriov_vfs_port): self.sriov_vfs_port[i].bind_driver(self.drivername) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf %s mac %s" % (self.pf_interface, i, Mac_list[i]), "# ", @@ -252,7 +252,7 @@ class TestICEEcpri(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def launch_testpmd(self): @@ -283,14 +283,14 @@ class TestICEEcpri(TestCase): "Failed to init DCF parent adapter" not in out_testpmd, "request for DCF is rejected.", ) - out_portinfo = self.dut.send_expect("show port info 0", "testpmd> ", 15) + out_portinfo = self.sut_node.send_expect("show port info 0", "testpmd> ", 15) self.verify("net_ice_dcf" in out_portinfo, "request for DCF is rejected.") else: self.verify( "Failed to init DCF parent adapter" in out_testpmd, "request for DCF is accepted.", ) - out_portinfo = self.dut.send_expect("show port info 0", "testpmd> ", 15) + out_portinfo = self.sut_node.send_expect("show port info 0", "testpmd> ", 15) self.verify( "net_ice_dcf" not in out_portinfo, "request for DCF is accepted." ) @@ -460,7 +460,7 @@ class TestICEEcpri(TestCase): ) def test_rss_for_eth_ecpri(self): - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") eal_param = " -a {} -a {}".format( self.sriov_vfs_port[0].pci, self.sriov_vfs_port[1].pci ) @@ -605,7 +605,7 @@ class TestICEEcpri(TestCase): self.new_session.send_expect( "ip link set {} vf 0 mac {}".format(self.pf_interface, new_mac), "#" ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_testpmd() self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123") self.pmd_output.execute_cmd( @@ -628,7 +628,7 @@ class TestICEEcpri(TestCase): hash_lst = [i.get("RSS hash") for i in out_data] self.verify(len(set(hash_lst)) == 1, "test fail, RSS hash is not same") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_testpmd() self.new_session.send_expect( "ip link set {} vf 0 trust off".format(self.pf_interface), "#" @@ -688,7 +688,7 @@ class TestICEEcpri(TestCase): ) def test_DCF_exit_for_eth_ecpri_and_udp_ecpri_rss(self): - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") eal_param = " -a {},cap=dcf".format(self.sriov_vfs_port[0].pci) self.pmd_output.start_testpmd( cores=list(range(8)), @@ -697,7 +697,7 @@ class TestICEEcpri(TestCase): socket=self.ports_socket, ) self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123") - pmd_output1 = PmdOutput(self.dut, self.new_session) + pmd_output1 = PmdOutput(self.sut_node, self.new_session) eal_param1 = " -a {} -a {}".format( self.sriov_vfs_port[1].pci, self.sriov_vfs_port[2].pci ) @@ -866,14 +866,14 @@ class TestICEEcpri(TestCase): rule_id = 0 if isinstance(rule_id, list): for i in rule_id: - out = self.dut.send_command( + out = self.sut_node.send_command( "flow destroy %s rule %s" % (port_id, i), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule %s delete failed" % rule_id) else: - out = self.dut.send_command( + out = self.sut_node.send_command( "flow destroy %s rule %s" % (port_id, rule_id), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") @@ -881,12 +881,12 @@ class TestICEEcpri(TestCase): self.verify(m, "flow rule %s delete failed" % rule_id) def send_packets(self, packets, tx_port=None, count=1): - self.pkt.update_pkt(packets) - tx_port = self.tester_iface0 if not tx_port else tx_port - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count) + self.scapy_pkt_builder.update_pkt(packets) + tx_port = self.tg_iface0 if not tx_port else tx_port + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port, count=count) def send_pkts_getouput(self, pkts, port_id=0, count=1, drop=False): - tx_port = self.tester_iface0 if port_id == 0 else self.tester_iface1 + tx_port = self.tg_iface0 if port_id == 0 else self.tg_iface1 time.sleep(1) if drop: @@ -910,7 +910,7 @@ class TestICEEcpri(TestCase): if tv["send_port"].get("port_id") is not None else 0 ) - dut_port_id = ( + sut_port_id = ( tv["check_param"]["port_id"] if tv["check_param"].get("port_id") is not None else 0 @@ -947,7 +947,7 @@ class TestICEEcpri(TestCase): self.check_fdir_rule( port_id=tv["check_param"]["port_id"], rule_list=["0"] + rule_li ) - self.destroy_fdir_rule(rule_id=rule_li, port_id=dut_port_id) + self.destroy_fdir_rule(rule_id=rule_li, port_id=sut_port_id) # send matched packet out3 = self.send_pkts_getouput( pkts=tv["scapy_str"]["match"], @@ -968,14 +968,14 @@ class TestICEEcpri(TestCase): ) # check not rule exists self.check_fdir_rule( - port_id=dut_port_id, rule_list=rule_li, stats=False + port_id=sut_port_id, rule_list=rule_li, stats=False ) test_results[tv["name"]] = True self.logger.info((GREEN("case passed: %s" % tv["name"]))) except Exception as e: self.logger.warning((RED(e))) - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("flow flush 1", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 1", timeout=1) test_results[tv["name"]] = False self.logger.info((GREEN("case failed: %s" % tv["name"]))) continue @@ -1118,7 +1118,7 @@ class TestICEEcpri(TestCase): [data.get("FDIR matched ID") for data in data_lst] == [None, "0x2"], "pkt with wrong FDIR matched ID!", ) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") self.launch_testpmd() self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123") self.pmd_output.execute_cmd( @@ -1149,7 +1149,7 @@ class TestICEEcpri(TestCase): ) def test_ecpri_fdir_when_DCF_exit(self): - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") eal_param = " -a {},cap=dcf".format(self.sriov_vfs_port[0].pci) self.pmd_output.start_testpmd( cores=list(range(8)), @@ -1158,7 +1158,7 @@ class TestICEEcpri(TestCase): socket=self.ports_socket, ) self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123") - pmd_output1 = PmdOutput(self.dut, self.new_session) + pmd_output1 = PmdOutput(self.sut_node, self.new_session) eal_param1 = " -a {} -a {}".format( self.sriov_vfs_port[1].pci, self.sriov_vfs_port[2].pci ) @@ -1198,7 +1198,7 @@ class TestICEEcpri(TestCase): "mark id or queue wrong!", ) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") data_lst = self.get_receive_lst(tag_lst, pkt_lst, pmd_output=pmd_output1) # verify self.verify( @@ -1249,8 +1249,8 @@ class TestICEEcpri(TestCase): r"sed -i '/rx_pkt_burst = iavf_recv_pkts;/{n;s/\}/\}dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;\n/g}' ", ] for cmd in cmd_lst: - self.dut.send_expect(cmd + self.file_path, "#") - self.dut.build_install_dpdk(self.target) + self.sut_node.send_expect(cmd + self.file_path, "#") + self.sut_node.build_install_dpdk(self.target) def send_and_verify(self, dts_mac, ecpri, if_match=True): ptype_lst = ptype_match_lst if if_match else ptype_nomatch_lst @@ -1263,15 +1263,15 @@ class TestICEEcpri(TestCase): def send_pkt( self, pkt_str="", dts_mac="00:11:22:33:44:11", ecpri="0x5123", pmd_output="" ): - self.pkt.append_pkt(pkt_str.format(dts_mac, ecpri)) - self.pkt.send_pkt(crb=self.tester, tx_port=self.tester_iface0, count=1) + self.scapy_pkt_builder.append_pkt(pkt_str.format(dts_mac, ecpri)) + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tg_iface0, count=1) out = pmd_output.get_output() if pmd_output else self.pmd_output.get_output() - self.pkt.update_pkt([]) + self.scapy_pkt_builder.update_pkt([]) return out def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): self.new_session.close() - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_fdir.py b/tests/TestSuite_ice_fdir.py index 60a41c6d..1aa57a43 100644 --- a/tests/TestSuite_ice_fdir.py +++ b/tests/TestSuite_ice_fdir.py @@ -9,8 +9,8 @@ import time import framework.utils as utils import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, skip_unsupported_pkg from framework.utils import GREEN, RED @@ -2686,7 +2686,7 @@ vectors_mac_ipv6_nat_t_esp = [ class TestICEFdir(TestCase): def query_count(self, hits_set, hits, port_id=0, rule_id=0): - out = self.dut.send_command( + out = self.sut_node.send_command( "flow query %s %s count" % (port_id, rule_id), timeout=1 ) p = re.compile("hits_set:\s(\d+).*hits:\s(\d+)", re.DOTALL) @@ -2777,8 +2777,8 @@ class TestICEFdir(TestCase): self.logger.info((GREEN("case passed: %s" % tv["name"]))) except Exception as e: self.logger.warning((RED(e))) - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("flow flush 1", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 1", timeout=1) test_results[tv["name"]] = False self.logger.info((GREEN("case failed: %s" % tv["name"]))) continue @@ -2826,27 +2826,27 @@ class TestICEFdir(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.portMask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.dut_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.dut_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.dut_port0) - self.tester_iface1 = self.tester.get_interface(self.dut_port1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] - - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) - pf_pci = [self.dut.ports_info[0]["pci"]] + self.portMask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.sut_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.sut_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.sut_port0) + self.tg_iface1 = self.tg_node.get_interface(self.sut_port1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) + pf_pci = [self.sut_node.ports_info[0]["pci"]] out = self.pmd_output.start_testpmd( "default", ports=pf_pci, eal_param="--log-level=ice,7" ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.max_rule_num = self.pmd_output.get_max_rule_number(self, out) self.launch_testpmd_with_mark() @@ -2883,12 +2883,12 @@ class TestICEFdir(TestCase): self.config_testpmd() def send_packets(self, packets, tx_port=None, count=1): - self.pkt.update_pkt(packets) - tx_port = self.tester_iface0 if not tx_port else tx_port - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count) + self.scapy_pkt_builder.update_pkt(packets) + tx_port = self.tg_iface0 if not tx_port else tx_port + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port, count=count) def send_pkts_getouput(self, pkts, port_id=0, count=1, drop=False): - tx_port = self.tester_iface0 if port_id == 0 else self.tester_iface1 + tx_port = self.tg_iface0 if port_id == 0 else self.tg_iface1 time.sleep(1) if drop: @@ -3011,14 +3011,14 @@ class TestICEFdir(TestCase): rule_id = 0 if isinstance(rule_id, list): for i in rule_id: - out = self.dut.send_command( + out = self.sut_node.send_command( "flow destroy %s rule %s" % (port_id, i), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule %s delete failed" % rule_id) else: - out = self.dut.send_command( + out = self.sut_node.send_command( "flow destroy %s rule %s" % (port_id, rule_id), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") @@ -3266,7 +3266,7 @@ class TestICEFdir(TestCase): def test_invalid_parameters_of_queue_index(self): rule = "flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions queue index 64 / end" - out = self.dut.send_command(rule, timeout=1) + out = self.sut_node.send_command(rule, timeout=1) self.verify("error" in out, "failed with output: %s" % out) self.check_fdir_rule(port_id=0, stats=False) @@ -3283,16 +3283,16 @@ class TestICEFdir(TestCase): self.create_fdir_rule(rule3, check_stats=False, msg="error") try: # restart testpmd - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() self.launch_testpmd_with_mark(rxq=7, txq=7) self.pmd_output.execute_cmd("start") rule4 = "flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 proto is 255 / end actions rss queues 0 1 2 3 4 5 6 7 end / end" self.create_fdir_rule(rule4, check_stats=False) self.check_fdir_rule(port_id=0, stats=False) # restart testpmd - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() self.launch_testpmd_with_mark(rxq=8, txq=8) self.pmd_output.execute_cmd("start") self.create_fdir_rule(rule4, check_stats=True) @@ -3313,7 +3313,7 @@ class TestICEFdir(TestCase): except Exception as e: raise Exception(e) finally: - self.dut.kill_all() + self.sut_node.kill_all() self.launch_testpmd_with_mark() def test_invalid_parameters_of_input_set(self): @@ -3590,7 +3590,7 @@ class TestICEFdir(TestCase): self.query_count(1, 10, 0, 7) self.check_fdir_rule(0, stats=True, rule_list=res) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) self.check_fdir_rule(stats=False) self.send_pkts_getouput(pkts=[pkt1, pkt2, pkt3, pkt4, pkt5, pkt6, pkt7, pkt8]) @@ -3661,7 +3661,7 @@ class TestICEFdir(TestCase): self.query_count(1, 10, port_id=0, rule_id=i) self.query_count(1, 10, port_id=1, rule_id=0) self.check_fdir_rule(port_id=0, stats=True, rule_list=res[:-1]) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) self.check_fdir_rule(stats=False) self.send_pkts_getouput(pkts=[pkt1, pkt2, pkt3, pkt4, pkt5, pkt6, pkt7, pkt8]) @@ -3685,12 +3685,12 @@ class TestICEFdir(TestCase): rfc.check_drop(out, pkt_num=10, check_param={"port_id": 0}, stats=True) self.query_count(1, 10, port_id=0, rule_id=255) self.check_fdir_rule(0, stats=True, rule_list=res) - self.dut.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) out = self.send_pkts_getouput(pkt, count=10, drop=True) rfc.check_drop(out, pkt_num=10, check_param={"port_id": 0}, stats=False) self.check_fdir_rule(stats=False) - self.dut.send_command("stop", timeout=2) - self.dut.send_command("quit", timeout=2) + self.sut_node.send_command("stop", timeout=2) + self.sut_node.send_command("quit", timeout=2) self.launch_testpmd_with_mark() @skip_unsupported_pkg("os default") @@ -4026,8 +4026,8 @@ class TestICEFdir(TestCase): with open(cmd_path, "w") as f: f.writelines(cmd_li) self.pmd_output.execute_cmd("stop") - self.dut.send_command("quit", timeout=2) - self.dut.session.copy_file_to(cmd_path, cmd_path) + self.sut_node.send_command("quit", timeout=2) + self.sut_node.session.copy_file_to(cmd_path, cmd_path) try: out = self.pmd_output.start_testpmd( cores="1S/4C/1T", @@ -4066,7 +4066,7 @@ class TestICEFdir(TestCase): except Exception as e: raise Exception(e) finally: - self.dut.kill_all() + self.sut_node.kill_all() self.launch_testpmd_with_mark() def test_mac_ipv4_pay(self): @@ -4164,10 +4164,10 @@ class TestICEFdir(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("flow flush 1", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 1", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_flow_priority.py b/tests/TestSuite_ice_flow_priority.py index 4f63f06d..37587df1 100644 --- a/tests/TestSuite_ice_flow_priority.py +++ b/tests/TestSuite_ice_flow_priority.py @@ -7,8 +7,8 @@ import json import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from .rte_flow_common import FdirProcessing, check_drop @@ -402,19 +402,19 @@ class ICEPFFlowPriorityTest(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_iface1 = self.tester.get_interface(self.tester_port0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pkt = Packet() - self.pmdout = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmdout = PmdOutput(self.sut_node) self.rxq = 16 self.process = FdirProcessing( - self, self.pmdout, [self.tester_iface0, self.tester_iface1], self.rxq + self, self.pmdout, [self.tg_iface0, self.tg_iface1], self.rxq ) def set_up(self): @@ -425,10 +425,10 @@ class ICEPFFlowPriorityTest(TestCase): self.launch_testpmd() def reload_ice(self): - self.dut.bind_interfaces_linux("ice") - self.dut.send_expect("rmmod ice", "#", 120) - self.dut.send_expect("modprobe ice", "#", 120) - self.dut.bind_interfaces_linux("vfio-pci") + self.sut_node.bind_interfaces_linux("ice") + self.sut_node.send_expect("rmmod ice", "#", 120) + self.sut_node.send_expect("modprobe ice", "#", 120) + self.sut_node.bind_interfaces_linux("vfio-pci") def launch_testpmd(self, eal_param=False): """ @@ -662,4 +662,4 @@ class ICEPFFlowPriorityTest(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_iavf_fdir.py b/tests/TestSuite_ice_iavf_fdir.py index 2ff254fe..0605b3e2 100644 --- a/tests/TestSuite_ice_iavf_fdir.py +++ b/tests/TestSuite_ice_iavf_fdir.py @@ -8,8 +8,8 @@ from multiprocessing import Manager, Process import framework.utils as utils import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, skip_unsupported_pkg from framework.utils import GREEN, RED @@ -8261,7 +8261,7 @@ class TestICEIAVFFdir(TestCase): for tv in vectors: try: port_id = tv["check_param"]["port_id"] - self.dut.send_expect("flow flush %d" % port_id, "testpmd> ", 120) + self.sut_node.send_expect("flow flush %d" % port_id, "testpmd> ", 120) # validate rule self.validate_fdir_rule(tv["rule"], check_stats=True) @@ -8369,44 +8369,44 @@ class TestICEIAVFFdir(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - localPort1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(localPort0) - self.tester_iface1 = self.tester.get_interface(localPort1) - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_intf = self.dut.ports_info[self.dut_ports[1]]["intf"] - self.pf0_mac = self.dut.get_mac_address(0) - self.pf1_mac = self.dut.get_mac_address(1) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + localPort1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(localPort0) + self.tg_iface1 = self.tg_node.get_interface(localPort1) + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_intf = self.sut_node.ports_info[self.sut_ports[1]]["intf"] + self.pf0_mac = self.sut_node.get_mac_address(0) + self.pf1_mac = self.sut_node.get_mac_address(1) # bind pf to kernel - for port in self.dut_ports: - netdev = self.dut.ports_info[port]["port"] + for port in self.sut_ports: + netdev = self.sut_node.ports_info[port]["port"] netdev.bind_driver(driver="ice") # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.suite_config = rfc.get_suite_config(self) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) - self.path = self.dut.apps_name["test-pmd"] + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) + self.path = self.sut_node.apps_name["test-pmd"] self.src_file_dir = "dep/" - self.dut_file_dir = "/tmp/" + self.sut_file_dir = "/tmp/" self.q_num = TXQ_RXQ_NUMBER def set_up(self): """ Run before each test case. """ - self.dut.restore_interfaces_linux() + self.sut_node.restore_interfaces_linux() self.setup_2pf_4vf_env() time.sleep(1) self.launch_testpmd() @@ -8414,25 +8414,25 @@ class TestICEIAVFFdir(TestCase): def setup_2pf_4vf_env(self, driver="default"): # get PF interface name - self.used_dut_port_0 = self.dut_ports[0] - self.used_dut_port_1 = self.dut_ports[1] + self.used_sut_port_0 = self.sut_ports[0] + self.used_sut_port_1 = self.sut_ports[1] # generate 2 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 2, driver=driver) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 2, driver=driver) - self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.sriov_vfs_pf1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 2, driver=driver) + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 2, driver=driver) + self.sriov_vfs_pf0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sriov_vfs_pf1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 1 mac 00:11:22:33:44:66" % self.pf0_intf, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:77" % self.pf1_intf, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 1 mac 00:11:22:33:44:88" % self.pf1_intf, "#" ) @@ -8446,37 +8446,37 @@ class TestICEIAVFFdir(TestCase): except Exception as e: self.destroy_env() raise Exception(e) - out = self.dut.send_expect("./usertools/dpdk-devbind.py -s", "#") + out = self.sut_node.send_expect("./usertools/dpdk-devbind.py -s", "#") print(out) def setup_npf_nvf_env(self, pf_num=2, vf_num=2, driver="default"): # get PF interface name - self.used_dut_port_0 = self.dut_ports[0] - self.used_dut_port_1 = self.dut_ports[1] + self.used_sut_port_0 = self.sut_ports[0] + self.used_sut_port_1 = self.sut_ports[1] try: # generate vf on pf if pf_num == 1: - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, vf_num, driver=driver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, vf_num, driver=driver ) - self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0][ + self.sriov_vfs_pf0 = self.sut_node.ports_info[self.used_sut_port_0][ "vfs_port" ] # bind VF0 and VF1 to dpdk driver for vf_port in self.sriov_vfs_pf0: vf_port.bind_driver(self.vf_driver) else: - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, vf_num, driver=driver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, vf_num, driver=driver ) - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_1, vf_num, driver=driver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_1, vf_num, driver=driver ) - self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0][ + self.sriov_vfs_pf0 = self.sut_node.ports_info[self.used_sut_port_0][ "vfs_port" ] - self.sriov_vfs_pf1 = self.dut.ports_info[self.used_dut_port_1][ + self.sriov_vfs_pf1 = self.sut_node.ports_info[self.used_sut_port_1][ "vfs_port" ] for vf_port in self.sriov_vfs_pf0: @@ -8487,17 +8487,17 @@ class TestICEIAVFFdir(TestCase): except Exception as e: self.destroy_env() raise Exception(e) - out = self.dut.send_expect("./usertools/dpdk-devbind.py -s", "#") + out = self.sut_node.send_expect("./usertools/dpdk-devbind.py -s", "#") print(out) def destroy_env(self): """ This is to stop testpmd and destroy 1pf and 2vfs environment. """ - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[1]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[1]) def config_testpmd(self): self.pmd_output.execute_cmd("set fwd rxonly") @@ -8526,9 +8526,9 @@ class TestICEIAVFFdir(TestCase): self.config_testpmd() def send_packets(self, packets, pf_id=0, count=1): - self.pkt.update_pkt(packets) - tx_port = self.tester_iface0 if pf_id == 0 else self.tester_iface1 - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count) + self.scapy_pkt_builder.update_pkt(packets) + tx_port = self.tg_iface0 if pf_id == 0 else self.tg_iface1 + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port, count=count) def send_pkts_getouput(self, pkts, pf_id=0, count=1): """ @@ -8537,7 +8537,7 @@ class TestICEIAVFFdir(TestCase): """ self.send_packets(pkts, pf_id, count) time.sleep(1) - out_info = self.dut.get_session_output(timeout=1) + out_info = self.sut_node.get_session_output(timeout=1) out_pkt = self.pmd_output.execute_cmd("stop") out = out_info + out_pkt self.pmd_output.execute_cmd("start") @@ -8643,7 +8643,7 @@ class TestICEIAVFFdir(TestCase): self.verify(not p.search(out), "flow rule on port %s is existed" % port_id) def check_rule_number(self, port_id=0, num=0): - out = self.dut.send_command("flow list %s" % port_id, timeout=30) + out = self.sut_node.send_command("flow list %s" % port_id, timeout=30) result_scanner = r"\d*.*?\d*.*?\d*.*?=>*" scanner = re.compile(result_scanner, re.DOTALL) li = scanner.findall(out) @@ -9369,8 +9369,8 @@ class TestICEIAVFFdir(TestCase): """ create same rules on pf and vf, no conflict """ - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / end", @@ -9396,12 +9396,12 @@ class TestICEIAVFFdir(TestCase): % self.pf1_mac, ], } - out_pf0 = self.dut.send_expect( + out_pf0 = self.sut_node.send_expect( "ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf0_intf, "# ", ) - out_pf1 = self.dut.send_expect( + out_pf1 = self.sut_node.send_expect( "ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf1_intf, "# ", @@ -9419,7 +9419,7 @@ class TestICEIAVFFdir(TestCase): + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) ) - self.dut.send_expect(command, "testpmd> ", 300) + self.sut_node.send_expect(command, "testpmd> ", 300) self.config_testpmd() eal_param = "-c 0xf0 -n 6 -a %s -a %s --file-prefix=pf1" % ( @@ -9462,13 +9462,13 @@ class TestICEIAVFFdir(TestCase): time.sleep(1) # send matched packets - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tg_iface0) ) - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tg_iface1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ") self.verify( @@ -9537,8 +9537,8 @@ class TestICEIAVFFdir(TestCase): ) # flush all the rules - self.dut.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect("flow flush 1", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 1", "testpmd> ") self.session_secondary.send_expect("flow flush 0", "testpmd> ") self.session_secondary.send_expect("flow flush 1", "testpmd> ") @@ -9556,13 +9556,13 @@ class TestICEIAVFFdir(TestCase): ) # send matched packets - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tg_iface0) ) - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tg_iface1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ") self.verify( @@ -9602,15 +9602,15 @@ class TestICEIAVFFdir(TestCase): out_vf11, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=False ) - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) def test_create_same_input_diff_action_on_pf_vf(self): """ create same input set but different action rules on pf and vf, no conflict. """ - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / mark id 1 / end", @@ -9638,12 +9638,12 @@ class TestICEIAVFFdir(TestCase): % self.pf1_mac, ], } - out_pf0 = self.dut.send_expect( + out_pf0 = self.sut_node.send_expect( "ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf0_intf, "# ", ) - out_pf1 = self.dut.send_expect( + out_pf1 = self.sut_node.send_expect( "ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 2" % self.pf1_intf, "# ", @@ -9661,7 +9661,7 @@ class TestICEIAVFFdir(TestCase): + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) ) - self.dut.send_expect(command, "testpmd> ", 300) + self.sut_node.send_expect(command, "testpmd> ", 300) self.config_testpmd() eal_param = "-c 0xf0 -n 6 -a %s -a %s --file-prefix=pf1" % ( @@ -9698,13 +9698,13 @@ class TestICEIAVFFdir(TestCase): time.sleep(1) # send matched packets - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tg_iface0) ) - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tg_iface1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ") self.verify( @@ -9791,8 +9791,8 @@ class TestICEIAVFFdir(TestCase): ) # flush all the rules - self.dut.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect("flow flush 1", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 1", "testpmd> ") self.session_secondary.send_expect("flow flush 0", "testpmd> ") self.session_secondary.send_expect("flow flush 1", "testpmd> ") @@ -9810,13 +9810,13 @@ class TestICEIAVFFdir(TestCase): ) # send matched packets - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tg_iface0) ) - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tg_iface1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ") self.verify( @@ -9865,15 +9865,15 @@ class TestICEIAVFFdir(TestCase): stats=False, ) - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) def test_create_diff_input_diff_action_on_pf_vf(self): """ create different rules on pf and vf """ - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions rss queues 2 3 end / end", @@ -9901,12 +9901,12 @@ class TestICEIAVFFdir(TestCase): % self.pf1_mac, ], } - out_pf0 = self.dut.send_expect( + out_pf0 = self.sut_node.send_expect( "ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf0_intf, "# ", ) - out_pf1 = self.dut.send_expect( + out_pf1 = self.sut_node.send_expect( "ethtool -N %s flow-type udp4 src-ip 192.168.0.22 dst-ip 192.168.0.23 src-port 22 dst-port 23 action -1" % self.pf1_intf, "# ", @@ -9924,7 +9924,7 @@ class TestICEIAVFFdir(TestCase): + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) ) - self.dut.send_expect(command, "testpmd> ", 300) + self.sut_node.send_expect(command, "testpmd> ", 300) self.config_testpmd() eal_param = "-c 0xf0 -n 6 -a %s -a %s --file-prefix=pf1" % ( @@ -9961,13 +9961,13 @@ class TestICEIAVFFdir(TestCase): time.sleep(1) # send matched packets - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tg_iface0) ) - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tg_iface1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ") @@ -10049,8 +10049,8 @@ class TestICEIAVFFdir(TestCase): ) # flush all the rules - self.dut.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect("flow flush 1", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 1", "testpmd> ") self.session_secondary.send_expect("flow flush 0", "testpmd> ") self.session_secondary.send_expect("flow flush 1", "testpmd> ") @@ -10068,13 +10068,13 @@ class TestICEIAVFFdir(TestCase): ) # send matched packets - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][0], self.tg_iface0) ) - self.tester.scapy_append( - 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1) + self.tg_node.scapy_append( + 'sendp([%s], iface="%s")' % (pkts["pf"][1], self.tg_iface1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ") self.verify("rx_queue_1_packets: 1" in out_pf0, "the rule is not destroyed") @@ -10118,8 +10118,8 @@ class TestICEIAVFFdir(TestCase): out_vf11, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=False ) - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) @skip_unsupported_pkg(["os default", "wireless"]) def test_maxnum_128_profiles(self): @@ -10133,13 +10133,13 @@ class TestICEIAVFFdir(TestCase): self.destroy_env() self.setup_npf_nvf_env(pf_num=1, vf_num=16) - if len(self.dut_ports) == 4: + if len(self.sut_ports) == 4: nex_cnt = 94 // 8 - elif len(self.dut_ports) == 2: + elif len(self.sut_ports) == 2: nex_cnt = 110 // 8 # check the card is chapman beach 100g*2 or not - pf_pci = self.dut.ports_info[0]["pci"] - out = self.dut.send_expect( + pf_pci = self.sut_node.ports_info[0]["pci"] + out = self.sut_node.send_expect( 'lspci -s {} -vvv |grep "Product Name"'.format(pf_pci), "#" ) res = re.search(r"Network Adapter\s+(?PE810-.*)", out) @@ -10151,12 +10151,12 @@ class TestICEIAVFFdir(TestCase): else: self.verify(False, "The number of ports is not supported") - self.dut.send_expect( + self.sut_node.send_expect( "ip link set {} vf {} mac 00:11:22:33:44:55".format(self.pf0_intf, nex_cnt), "#", ) command = self.path + " -c f -n 6 -- -i --rxq=16 --txq=16" - self.dut.send_expect(command, "testpmd> ", 360) + self.sut_node.send_expect(command, "testpmd> ", 360) self.config_testpmd() for port_id in range(nex_cnt): @@ -10210,7 +10210,7 @@ class TestICEIAVFFdir(TestCase): stats=False, ) - self.dut.send_expect("flow flush {}".format(nex_cnt), "testpmd> ") + self.sut_node.send_expect("flow flush {}".format(nex_cnt), "testpmd> ") self.check_fdir_rule(port_id=(nex_cnt), stats=False) out = self.send_pkts_getouput(pkts=pkt1) rfc.check_iavf_fdir_mark( @@ -10243,10 +10243,10 @@ class TestICEIAVFFdir(TestCase): check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True, ) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # show the rule list, there is no rule listed self.check_fdir_rule(port_id=0, stats=False) out = self.send_pkts_getouput(pkts=pkt) @@ -10297,7 +10297,7 @@ class TestICEIAVFFdir(TestCase): stats=True, ) - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ") out_0 = self.send_pkts_getouput(pkts=pkts[0]) rfc.check_iavf_fdir_mark( out_0, @@ -10319,11 +10319,11 @@ class TestICEIAVFFdir(TestCase): check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True, ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") rule_li = self.create_fdir_rule(rules, check_stats=True) self.check_fdir_rule(port_id=0, rule_list=rule_li) - self.dut.send_expect("flow destroy 0 rule 1", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd> ") out_0 = self.send_pkts_getouput(pkts=pkts[0]) rfc.check_iavf_fdir_mark( out_0, @@ -10345,11 +10345,11 @@ class TestICEIAVFFdir(TestCase): check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True, ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") rule_li = self.create_fdir_rule(rules, check_stats=True) self.check_fdir_rule(port_id=0, rule_list=rule_li) - self.dut.send_expect("flow destroy 0 rule 2", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 2", "testpmd> ") out_0 = self.send_pkts_getouput(pkts=pkts[0]) rfc.check_iavf_fdir_mark( out_0, @@ -10371,7 +10371,7 @@ class TestICEIAVFFdir(TestCase): check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=False, ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") out_0 = self.send_pkts_getouput(pkts=pkts[0]) rfc.check_iavf_fdir_mark( @@ -10425,11 +10425,11 @@ class TestICEIAVFFdir(TestCase): stats=True, ) # reset vf - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port reset 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port reset 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check there is not rule listed on port 0, the rule of port 1 is still be listed. self.check_fdir_rule(port_id=0, stats=False) self.check_fdir_rule(port_id=1, rule_list=["0"]) @@ -10456,7 +10456,7 @@ class TestICEIAVFFdir(TestCase): stats=True, ) # relaunch testpmd, and create the rules, check matched packets. - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_testpmd() self.create_fdir_rule(rules, check_stats=True) out0 = self.send_pkts_getouput(pkts=pkts[0]) @@ -10504,11 +10504,11 @@ class TestICEIAVFFdir(TestCase): stats=True, ) # reset vf - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect("port reset 1", "testpmd> ") - self.dut.send_expect("port start 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect("port reset 1", "testpmd> ") + self.sut_node.send_expect("port start 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check the rule of port0 is still listed, check there is not rule listed on port 1. self.check_fdir_rule(port_id=0, rule_list=["0"]) self.check_fdir_rule(port_id=1, stats=False) @@ -10544,7 +10544,7 @@ class TestICEIAVFFdir(TestCase): stats=False, ) # relaunch testpmd, and create the rules, check matched packets. - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_testpmd() self.create_fdir_rule(rules, check_stats=True) out0 = self.send_pkts_getouput(pkts=pkts[0]) @@ -10568,7 +10568,7 @@ class TestICEIAVFFdir(TestCase): then add a new rule which can take effect. relaunch testpmd, create same rules, can take effect. """ - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end", "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end", @@ -10599,13 +10599,13 @@ class TestICEIAVFFdir(TestCase): self.session_secondary.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:56" % self.pf0_intf, "# " ) - out = self.dut.session.get_session_before(timeout=2) + out = self.sut_node.session.get_session_before(timeout=2) self.verify("Port 0: reset event" in out, "failed to reset vf0") - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port reset 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port reset 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check there is not rule listed on vf0 self.check_fdir_rule(0, stats=False) out0 = self.send_pkts_getouput(pkts=pkts[2]) @@ -10630,7 +10630,7 @@ class TestICEIAVFFdir(TestCase): stats=True, ) # relaunch testpmd, and create the rules, check matched packets. - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_testpmd() self.create_fdir_rule(rules, check_stats=True) out0 = self.send_pkts_getouput(pkts=pkts[2]) @@ -10647,11 +10647,11 @@ class TestICEIAVFFdir(TestCase): check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True, ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.session_secondary.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "# " ) - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def test_stress_pf_reset_vf_delete_rule(self): """ @@ -10659,7 +10659,7 @@ class TestICEIAVFFdir(TestCase): then delete the rule which can't take effect without core dump, relaunch testpmd, create same rules, can take effect. """ - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 6 / mark / end", "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 6 / mark / end", @@ -10688,13 +10688,13 @@ class TestICEIAVFFdir(TestCase): self.session_secondary.send_expect( "ip link set %s vf 1 mac 00:11:22:33:44:56" % self.pf0_intf, "# " ) - out = self.dut.session.get_session_before(timeout=2) + out = self.sut_node.session.get_session_before(timeout=2) self.verify("Port 1: reset event" in out, "failed to reset vf1") - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect("port reset 1", "testpmd> ") - self.dut.send_expect("port start 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect("port reset 1", "testpmd> ") + self.sut_node.send_expect("port start 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check there is not rule listed on vf1 self.check_fdir_rule(1, stats=False) out0 = self.send_pkts_getouput(pkts=pkts[2]) @@ -10709,7 +10709,7 @@ class TestICEIAVFFdir(TestCase): stats=True, ) # delete the rules - self.dut.send_expect("flow destroy 0 rule 0", "Flow rule #0 destroyed") + self.sut_node.send_expect("flow destroy 0 rule 0", "Flow rule #0 destroyed") out0 = self.send_pkts_getouput(pkts=pkts[0]) rfc.check_iavf_fdir_mark( out0, @@ -10726,7 +10726,7 @@ class TestICEIAVFFdir(TestCase): ) # relaunch testpmd, and create the rules, check matched packets. - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_testpmd() self.create_fdir_rule(rules, check_stats=True) out0 = self.send_pkts_getouput(pkts=pkts[0]) @@ -10743,22 +10743,22 @@ class TestICEIAVFFdir(TestCase): check_param={"port_id": 1, "mark_id": 0, "queue": 6}, stats=True, ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.session_secondary.send_expect( "ip link set %s vf 1 mac 00:11:22:33:44:66" % self.pf0_intf, "# " ) - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def checksum_enablehw(self, port, hw): """ set checksum parameters """ - self.dut.send_expect("set fwd csum", "testpmd>") - self.dut.send_expect("port stop all", "testpmd>") - self.dut.send_expect("csum set ip %s %d" % (hw, port), "testpmd>") - self.dut.send_expect("csum set udp %s %d" % (hw, port), "testpmd>") - self.dut.send_expect("port start all", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd csum", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("csum set ip %s %d" % (hw, port), "testpmd>") + self.sut_node.send_expect("csum set udp %s %d" % (hw, port), "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") def get_chksum_values(self, packets_expected): """ @@ -10768,41 +10768,41 @@ class TestICEIAVFFdir(TestCase): chksum = dict() - self.tester.send_expect("scapy", ">>> ") - self.tester.send_expect("import sys", ">>> ") - self.tester.send_expect("sys.path.append('./dep')", ">>> ") - self.tester.send_expect("from pfcp import PFCP", ">>> ") + self.tg_node.send_expect("scapy", ">>> ") + self.tg_node.send_expect("import sys", ">>> ") + self.tg_node.send_expect("sys.path.append('./dep')", ">>> ") + self.tg_node.send_expect("from pfcp import PFCP", ">>> ") for packet_type in list(packets_expected.keys()): - self.tester.send_expect("p = %s" % packets_expected[packet_type], ">>>") - out = self.tester.send_command("p.show2()", timeout=1) + self.tg_node.send_expect("p = %s" % packets_expected[packet_type], ">>>") + out = self.tg_node.send_command("p.show2()", timeout=1) chksums = checksum_pattern.findall(out) chksum[packet_type] = chksums - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") return chksum def checksum_validate(self, packets_sent, packets_expected): """ Validate the checksum. """ - tx_interface = self.tester_iface0 - rx_interface = self.tester_iface0 + tx_interface = self.tg_iface0 + rx_interface = self.tg_iface0 sniff_src = "00:11:22:33:44:55" result = dict() - pkt = Packet() + scapy_pkt_builder = ScapyPacketBuilder() chksum = self.get_chksum_values(packets_expected) - self.inst = self.tester.tcpdump_sniff_packets( + self.inst = self.tg_node.tcpdump_sniff_packets( intf=rx_interface, count=len(packets_sent), filters=[{"layer": "ether", "config": {"src": sniff_src}}], ) for packet_type in list(packets_sent.keys()): - pkt.append_pkt(packets_sent[packet_type]) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, count=1) + scapy_pkt_builder.append_pkt(packets_sent[packet_type]) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, count=1) - p = self.tester.load_tcpdump_sniff_packets(self.inst) + p = self.tg_node.load_tcpdump_sniff_packets(self.inst) nr_packets = len(p) print(p) packets_received = [ @@ -10844,16 +10844,16 @@ class TestICEIAVFFdir(TestCase): """ set rx_vlan and tx_vlan """ - self.dut.send_expect("vlan set filter on %d" % port, "testpmd> ", 20) - self.dut.send_expect("vlan set strip %s %d" % (strip, port), "testpmd> ", 20) - self.dut.send_expect("rx_vlan add %d %d" % (vlan, port), "testpmd> ", 20) - self.dut.send_expect("set verbose 1", "testpmd> ", 20) + self.sut_node.send_expect("vlan set filter on %d" % port, "testpmd> ", 20) + self.sut_node.send_expect("vlan set strip %s %d" % (strip, port), "testpmd> ", 20) + self.sut_node.send_expect("rx_vlan add %d %d" % (vlan, port), "testpmd> ", 20) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 20) if rx_tx == "tx": - self.dut.send_expect("port stop %d" % port, "testpmd> ", 20) - self.dut.send_expect("tx_vlan set %d %d" % (port, vlan), "testpmd> ", 20) - self.dut.send_expect("port start %d" % port, "testpmd> ", 20) - self.dut.send_expect("set fwd mac", "testpmd> ", 20) + self.sut_node.send_expect("port stop %d" % port, "testpmd> ", 20) + self.sut_node.send_expect("tx_vlan set %d %d" % (port, vlan), "testpmd> ", 20) + self.sut_node.send_expect("port start %d" % port, "testpmd> ", 20) + self.sut_node.send_expect("set fwd mac", "testpmd> ", 20) def get_tcpdump_package(self, pkts): """ @@ -10884,7 +10884,7 @@ class TestICEIAVFFdir(TestCase): "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / mark id 4 / end", ] - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.pmd_output.start_testpmd( cores="1S/4C/1T", param="--rxq={} --txq={} --enable-rx-cksum --port-topology=loop".format( @@ -10924,7 +10924,7 @@ class TestICEIAVFFdir(TestCase): self.checksum_enablehw(port=0, hw="hw") self.set_vlan(vlan=vlan, port=0, strip="on") - out_info = self.dut.send_expect("show port info 0", "testpmd> ", 20) + out_info = self.sut_node.send_expect("show port info 0", "testpmd> ", 20) self.verify("strip on" in out_info, "Wrong strip:" + out_info) # send packets and check the checksum value @@ -10935,12 +10935,12 @@ class TestICEIAVFFdir(TestCase): self.verify(vlan not in out_dump, "Wrong vlan:" + str(out_dump)) # Validate checksum on the receive packet - out_testpmd = self.dut.send_expect("stop", "testpmd> ") + out_testpmd = self.sut_node.send_expect("stop", "testpmd> ") bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd) bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd) self.verify(bad_ipcsum == 2, "Bad-ipcsum check error") self.verify(bad_l4csum == 4, "Bad-l4csum check error") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check fdir rule take effect self.create_fdir_rule(rules, check_stats=True) @@ -10974,7 +10974,7 @@ class TestICEIAVFFdir(TestCase): ) # destroy the rules and check there is no rule listed. - self.dut.send_expect("flow flush 0", "testpmd> ", 20) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 20) self.check_fdir_rule(port_id=0, stats=False) # check no rules existing @@ -11015,7 +11015,7 @@ class TestICEIAVFFdir(TestCase): self.verify(vlan not in out_dump, "Wrong vlan:" + str(out_dump)) # Validate checksum on the receive packet - out_testpmd = self.dut.send_expect("stop", "testpmd> ") + out_testpmd = self.sut_node.send_expect("stop", "testpmd> ") bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd) bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd) self.verify(bad_ipcsum == 2, "Bad-ipcsum check error") @@ -11039,7 +11039,7 @@ class TestICEIAVFFdir(TestCase): "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / mark id 4 / end", ] - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.pmd_output.start_testpmd( cores="1S/4C/1T", param="--rxq={} --txq={} --enable-rx-cksum --port-topology=loop".format( @@ -11079,7 +11079,7 @@ class TestICEIAVFFdir(TestCase): self.checksum_enablehw(port=0, hw="sw") self.set_vlan(vlan=vlan, port=0, strip="off") - out_info = self.dut.send_expect("show port info 0", "testpmd> ", 20) + out_info = self.sut_node.send_expect("show port info 0", "testpmd> ", 20) self.verify("strip off" in out_info, "Wrong strip:" + out_info) result = self.checksum_validate(pkts_sent, pkts_ref) @@ -11089,12 +11089,12 @@ class TestICEIAVFFdir(TestCase): self.verify(vlan in out_dump, "Wrong vlan:" + str(out_dump)) # Validate checksum on the receive packet - out_testpmd = self.dut.send_expect("stop", "testpmd> ") + out_testpmd = self.sut_node.send_expect("stop", "testpmd> ") bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd) bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd) self.verify(bad_ipcsum == 2, "Bad-ipcsum check error") self.verify(bad_l4csum == 4, "Bad-l4csum check error") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check fdir rule take effect self.create_fdir_rule(rules, check_stats=True) @@ -11128,7 +11128,7 @@ class TestICEIAVFFdir(TestCase): ) # destroy the rules and check there is no rule listed. - self.dut.send_expect("flow flush 0", "testpmd> ", 20) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 20) self.check_fdir_rule(port_id=0, stats=False) # check no rules existing @@ -11168,7 +11168,7 @@ class TestICEIAVFFdir(TestCase): self.verify(vlan in out_dump, "Wrong vlan:" + str(out_dump)) # Validate checksum on the receive packet - out_testpmd = self.dut.send_expect("stop", "testpmd> ") + out_testpmd = self.sut_node.send_expect("stop", "testpmd> ") bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd) bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd) self.verify(bad_ipcsum == 2, "Bad-ipcsum check error") @@ -11187,7 +11187,7 @@ class TestICEIAVFFdir(TestCase): "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / mark id 4 / end", ] - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.pmd_output.start_testpmd( cores="1S/4C/1T", param="--rxq={} --txq={} --enable-rx-cksum --port-topology=loop".format( @@ -11200,7 +11200,7 @@ class TestICEIAVFFdir(TestCase): mac = "00:11:22:33:44:55" sndIP = "10.0.0.1" sndIPv6 = "::1" - pkt = Packet() + scapy_pkt_builder = ScapyPacketBuilder() pkts_sent = { "IP/UDP/PFCP_NODE": 'Ether(dst="%s", src="52:00:00:00:00:00")/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(S=0)/("X"*46)' % (mac, sndIP), @@ -11213,25 +11213,25 @@ class TestICEIAVFFdir(TestCase): } self.set_vlan(vlan=vlan, port=0, strip="off", rx_tx="tx") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") - tx_interface = self.tester_iface0 - rx_interface = self.tester_iface0 + tx_interface = self.tg_iface0 + rx_interface = self.tg_iface0 dmac = "00:11:22:33:44:55" smac = self.pf1_mac - inst = self.tester.tcpdump_sniff_packets(rx_interface) + inst = self.tg_node.tcpdump_sniff_packets(rx_interface) for packet_type in list(pkts_sent.keys()): - pkt.append_pkt(pkts_sent[packet_type]) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, count=1) + scapy_pkt_builder.append_pkt(pkts_sent[packet_type]) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, count=1) - p = self.tester.load_tcpdump_sniff_packets(inst) + p = self.tg_node.load_tcpdump_sniff_packets(inst) out = self.get_tcpdump_package(p) self.verify(vlan in out, "Vlan not found:" + str(out)) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # check fdir rule take effect self.create_fdir_rule(rules, check_stats=True) @@ -11265,7 +11265,7 @@ class TestICEIAVFFdir(TestCase): ) # destroy the rules and check there is no rule listed. - self.dut.send_expect("flow flush 0", "testpmd> ", 20) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 20) self.check_fdir_rule(port_id=0, stats=False) # check no rules existing @@ -11298,11 +11298,11 @@ class TestICEIAVFFdir(TestCase): stats=False, ) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan reset 0", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan reset 0", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ", 30) def test_check_profile_delete(self): pkt_ipv4_pay_ipv6_pay = [ @@ -11601,13 +11601,13 @@ class TestICEIAVFFdir(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_env() if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) if getattr(self, "session_third", None): - self.dut.close_session(self.session_third) + self.sut_node.close_session(self.session_third) def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_env() diff --git a/tests/TestSuite_ice_iavf_fdir_gtpogre.py b/tests/TestSuite_ice_iavf_fdir_gtpogre.py index 7893faa5..93b4ef47 100644 --- a/tests/TestSuite_ice_iavf_fdir_gtpogre.py +++ b/tests/TestSuite_ice_iavf_fdir_gtpogre.py @@ -8,8 +8,8 @@ import time from scapy.all import * import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -2230,32 +2230,32 @@ tvs_outer_mac_ipv6_gre_ipv6_gtpu_dl = [ class TestICEIavfGTPoGREFDIR(TestCase): def set_up_all(self): - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) # init pkt - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() # set default app parameter - self.pmd_out = PmdOutput(self.dut) - self.tester_mac = self.tester.get_mac(0) - self.tester_port0 = self.tester.get_local_port(self.ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) + self.pmd_out = PmdOutput(self.sut_node) + self.tg_mac = self.tg_node.get_mac(0) + self.tg_port0 = self.tg_node.get_local_port(self.ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) - self.tester.send_expect("ifconfig {} up".format(self.tester_iface0), "# ") + self.tg_node.send_expect("ifconfig {} up".format(self.tg_iface0), "# ") self.param = "--rxq={} --txq={} --disable-rss --txd=384 --rxd=384".format( LAUNCH_QUEUE, LAUNCH_QUEUE ) self.param_fdir = "--rxq={} --txq={}".format(LAUNCH_QUEUE, LAUNCH_QUEUE) self.vf_flag = False - self.cores = self.dut.get_core_list("1S/4C/1T") + self.cores = self.sut_node.get_core_list("1S/4C/1T") self.setup_1pf_vfs_env() - self.ports_pci = [self.dut.ports_info[self.ports[0]]["pci"]] + self.ports_pci = [self.sut_node.ports_info[self.ports[0]]["pci"]] self.rxq = 16 self.fdirprocess = rfc.FdirProcessing( - self, self.pmd_out, [self.tester_iface0], LAUNCH_QUEUE, ipfrag_flag=False + self, self.pmd_out, [self.tg_iface0], LAUNCH_QUEUE, ipfrag_flag=False ) self.rssprocess = rfc.RssProcessing( - self, self.pmd_out, [self.tester_iface0], self.rxq + self, self.pmd_out, [self.tg_iface0], self.rxq ) def set_up(self): @@ -2266,14 +2266,14 @@ class TestICEIavfGTPoGREFDIR(TestCase): create vf and set vf mac """ self.vf_flag = True - self.dut.bind_interfaces_linux("ice") - self.pf_interface = self.dut.ports_info[0]["intf"] - self.dut.send_expect("ifconfig {} up".format(self.pf_interface), "# ") - self.dut.generate_sriov_vfs_by_port(self.ports[0], 1, driver=self.kdriver) - self.dut.send_expect( + self.sut_node.bind_interfaces_linux("ice") + self.pf_interface = self.sut_node.ports_info[0]["intf"] + self.sut_node.send_expect("ifconfig {} up".format(self.pf_interface), "# ") + self.sut_node.generate_sriov_vfs_by_port(self.ports[0], 1, driver=self.kdriver) + self.sut_node.send_expect( "ip link set {} vf 0 mac 00:11:22:33:44:55".format(self.pf_interface), "# " ) - self.vf_port = self.dut.ports_info[0]["vfs_port"] + self.vf_port = self.sut_node.ports_info[0]["vfs_port"] self.verify(len(self.vf_port) != 0, "VF create failed") self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: @@ -2296,9 +2296,9 @@ class TestICEIavfGTPoGREFDIR(TestCase): self.pmd_out.start_testpmd( cores=self.cores, ports=self.vf_ports_pci, param=self.param ) - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") def destroy_testpmd_and_vf(self): """ @@ -2306,15 +2306,15 @@ class TestICEIavfGTPoGREFDIR(TestCase): if vf testpmd, destroy the vfs and set vf_flag = false """ for port_id in self.ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + self.sut_node.destroy_sriov_vfs_by_port(port_id) def tear_down(self): - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() def tear_down_all(self): self.destroy_testpmd_and_vf() - self.dut.kill_all() + self.sut_node.kill_all() def test_mac_ipv4_gre_ipv4_gtpu_ipv4(self): self.launch_testpmd(param_fdir=True) @@ -2781,7 +2781,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): result = False self.logger.error("Error: queue index {} != 14".format(queue)) continue - self.dut.send_expect("flow destroy 0 rule 1", "testpmd>") + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd>") hashes, queues = self.rssprocess.send_pkt_get_hash_queues(pkts=pkt) for queue in queues: if "0xd" != queue: @@ -2789,7 +2789,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): self.logger.error("Error: queue index {} != 13".format(queue)) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 2: rule with eh and rule without eh") @@ -2823,7 +2823,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): self.logger.error("Error: queue index {} != 3".format(queue)) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 3: rule with l4 and rule without l4") @@ -2857,7 +2857,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): self.logger.error("Error: queue index {} != 3".format(queue)) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 4: rule with ul and rule without ul/dl") @@ -2880,7 +2880,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): result = False self.logger.error("Error: queue index {} != 3".format(queue)) continue - self.dut.send_expect("flow destroy 0 rule 1", "testpmd>") + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd>") hashes, queues = self.rssprocess.send_pkt_get_hash_queues(pkts=pkt) for queue in queues: if "0xd" != queue: @@ -2888,7 +2888,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): self.logger.error("Error: queue index {} != 13".format(queue)) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 5: ipv4/ipv4/ipv4 rule and ipv4/ipv6/ipv4 rule") @@ -2903,7 +2903,7 @@ class TestICEIavfGTPoGREFDIR(TestCase): self.logger.warning("Subcase 5 failed: %s" % e) result = False result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) for i in result_list: self.verify(i is True, "some subcase fail") diff --git a/tests/TestSuite_ice_iavf_fdir_pppol2tpoudp.py b/tests/TestSuite_ice_iavf_fdir_pppol2tpoudp.py index 7c1fce62..51c9bcdf 100644 --- a/tests/TestSuite_ice_iavf_fdir_pppol2tpoudp.py +++ b/tests/TestSuite_ice_iavf_fdir_pppol2tpoudp.py @@ -9,8 +9,8 @@ from multiprocessing import Manager, Process import framework.utils as utils import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -1905,7 +1905,7 @@ class TestICEIAVFFDIRPPPoL2TPv2oUDP(TestCase): for tv in vectors: try: port_id = tv["check_param"]["port_id"] - self.dut.send_expect("flow flush %d" % port_id, "testpmd> ", 120) + self.sut_node.send_expect("flow flush %d" % port_id, "testpmd> ", 120) # validate rule self.validate_fdir_rule(tv["rule"], check_stats=True) @@ -1959,34 +1959,34 @@ class TestICEIAVFFDIRPPPoL2TPv2oUDP(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "vfio-pci" - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port_0, 1, driver=self.kdriver + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port_0, 1, driver=self.kdriver ) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.dut.send_expect( + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "#" ) self.vf0_pci = self.sriov_vfs_port[0].pci for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) self.launch_testpmd() self.rxq = 16 @@ -2010,9 +2010,9 @@ class TestICEIAVFFDIRPPPoL2TPv2oUDP(TestCase): self.verify(res is True, "there have port link is down") def send_packets(self, packets, pf_id=0, count=1): - self.pkt.update_pkt(packets) - tx_port = self.tester_iface0 - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count) + self.scapy_pkt_builder.update_pkt(packets) + tx_port = self.tg_iface0 + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port, count=count) def send_pkts_getouput(self, pkts, pf_id=0, count=1): """ @@ -2021,7 +2021,7 @@ class TestICEIAVFFDIRPPPoL2TPv2oUDP(TestCase): """ self.send_packets(pkts, pf_id, count) time.sleep(1) - out_info = self.dut.get_session_output(timeout=1) + out_info = self.sut_node.get_session_output(timeout=1) out_pkt = self.pmd_output.execute_cmd("stop") out = out_info + out_pkt self.pmd_output.execute_cmd("start") @@ -2129,9 +2129,9 @@ class TestICEIAVFFDIRPPPoL2TPv2oUDP(TestCase): self.verify(not p.search(out), "flow rule on port %s is existed" % port_id) def destroy_vf(self): - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) def test_mac_ipv4_l2tpv2_control(self): self.rte_flow_process(vectors_mac_ipv4_l2tpv2_control) @@ -2189,10 +2189,10 @@ class TestICEIAVFFDIRPPPoL2TPv2oUDP(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("clear port stats all", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("clear port stats all", timeout=1) self.pmd_output.execute_cmd("stop") def tear_down_all(self): self.destroy_vf() - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_iavf_ip_fragment_rte_flow.py b/tests/TestSuite_ice_iavf_ip_fragment_rte_flow.py index 55a6618b..05b555c7 100644 --- a/tests/TestSuite_ice_iavf_ip_fragment_rte_flow.py +++ b/tests/TestSuite_ice_iavf_ip_fragment_rte_flow.py @@ -8,8 +8,8 @@ import time from scapy.all import * import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -317,31 +317,31 @@ tv_mac_ipv6_fragment_rss = { class TestICEIavfIpFragmentRteFlow(TestCase): def set_up_all(self): - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) # init pkt - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() # set default app parameter - self.pmd_out = PmdOutput(self.dut) - self.tester_mac = self.tester.get_mac(0) - self.tester_port0 = self.tester.get_local_port(self.ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) + self.pmd_out = PmdOutput(self.sut_node) + self.tg_mac = self.tg_node.get_mac(0) + self.tg_port0 = self.tg_node.get_local_port(self.ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) - self.tester.send_expect("ifconfig {} up".format(self.tester_iface0), "# ") + self.tg_node.send_expect("ifconfig {} up".format(self.tg_iface0), "# ") self.param = "--rxq={} --txq={} --disable-rss --txd=384 --rxd=384".format( LAUNCH_QUEUE, LAUNCH_QUEUE ) self.param_fdir = "--rxq={} --txq={}".format(LAUNCH_QUEUE, LAUNCH_QUEUE) - self.cores = self.dut.get_core_list("1S/4C/1T") + self.cores = self.sut_node.get_core_list("1S/4C/1T") self.setup_1pf_vfs_env() - self.ports_pci = [self.dut.ports_info[self.ports[0]]["pci"]] + self.ports_pci = [self.sut_node.ports_info[self.ports[0]]["pci"]] self.rssprocess = rfc.RssProcessing( - self, self.pmd_out, [self.tester_iface0], LAUNCH_QUEUE, ipfrag_flag=True + self, self.pmd_out, [self.tg_iface0], LAUNCH_QUEUE, ipfrag_flag=True ) self.fdirprocess = rfc.FdirProcessing( - self, self.pmd_out, [self.tester_iface0], LAUNCH_QUEUE, ipfrag_flag=True + self, self.pmd_out, [self.tg_iface0], LAUNCH_QUEUE, ipfrag_flag=True ) def set_up(self): @@ -351,14 +351,14 @@ class TestICEIavfIpFragmentRteFlow(TestCase): """ create vf and set vf mac """ - self.dut.bind_interfaces_linux("ice") - self.pf_interface = self.dut.ports_info[0]["intf"] - self.dut.send_expect("ifconfig {} up".format(self.pf_interface), "# ") - self.dut.generate_sriov_vfs_by_port(self.ports[0], 1, driver=self.kdriver) - self.dut.send_expect( + self.sut_node.bind_interfaces_linux("ice") + self.pf_interface = self.sut_node.ports_info[0]["intf"] + self.sut_node.send_expect("ifconfig {} up".format(self.pf_interface), "# ") + self.sut_node.generate_sriov_vfs_by_port(self.ports[0], 1, driver=self.kdriver) + self.sut_node.send_expect( "ip link set {} vf 0 mac 00:11:22:33:55:66".format(self.pf_interface), "# " ) - self.vf_port = self.dut.ports_info[0]["vfs_port"] + self.vf_port = self.sut_node.ports_info[0]["vfs_port"] self.verify(len(self.vf_port) != 0, "VF create failed") self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: @@ -381,9 +381,9 @@ class TestICEIavfIpFragmentRteFlow(TestCase): self.pmd_out.start_testpmd( cores=self.cores, ports=self.vf_ports_pci, param=self.param ) - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") def destroy_testpmd_and_vf(self): """ @@ -391,15 +391,15 @@ class TestICEIavfIpFragmentRteFlow(TestCase): if vf testpmd, destroy the vfs and set vf_flag = false """ for port_id in self.ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + self.sut_node.destroy_sriov_vfs_by_port(port_id) def tear_down(self): - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() def tear_down_all(self): self.destroy_testpmd_and_vf() - self.dut.kill_all() + self.sut_node.kill_all() def test_iavf_mac_ipv4_frag_fdir(self): self.launch_testpmd(param_fdir=True) @@ -450,7 +450,7 @@ class TestICEIavfIpFragmentRteFlow(TestCase): self.logger.error("Error: queue index {} != '2'".format(queue[0][0])) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 2: exclusive validation fdir rule") @@ -469,7 +469,7 @@ class TestICEIavfIpFragmentRteFlow(TestCase): self.logger.error("Error: queue index {} != '2'".format(queue[0][0])) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 3: exclusive validation rss rule") @@ -497,7 +497,7 @@ class TestICEIavfIpFragmentRteFlow(TestCase): result = False self.logger.error("hash value is incorrect") result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 4: exclusive validation rss rule") @@ -516,7 +516,7 @@ class TestICEIavfIpFragmentRteFlow(TestCase): result = False self.logger.error("hash value is incorrect") result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.verify(all(result_list) is True, "sub-case failed {}".format(result_list)) diff --git a/tests/TestSuite_ice_iavf_rss_configure.py b/tests/TestSuite_ice_iavf_rss_configure.py index 8194c81d..e8230f5c 100644 --- a/tests/TestSuite_ice_iavf_rss_configure.py +++ b/tests/TestSuite_ice_iavf_rss_configure.py @@ -9,7 +9,7 @@ import time from scapy.contrib.gtp import * -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder from framework.pmd_output import PmdOutput from framework.test_case import TestCase @@ -234,31 +234,31 @@ class IAVFRSSConfigureTest(TestCase): Run at the start of each test suite. Generic filter Prerequistites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) self.cores = "1S/5C/1T" # check core num - core_list = self.dut.get_core_list(self.cores) + core_list = self.sut_node.get_core_list(self.cores) self.verify(len(core_list) >= 5, "Insufficient cores for testing") self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "vfio-pci" - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.create_vf() self.queue_num = 16 self.param = " --rxq={} --txq={} ".format(self.queue_num, self.queue_num) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.launch_testpmd(param=self.param) self.rssprocess = RssProcessing( - self, self.pmdout, [self.tester_iface0], self.queue_num + self, self.pmdout, [self.tg_iface0], self.queue_num ) - self.dut_session = self.dut.new_session() + self.sut_session = self.sut_node.new_session() def set_up(self): """ @@ -266,7 +266,7 @@ class IAVFRSSConfigureTest(TestCase): """ # check testpmd process status cmd = "ps -aux | grep testpmd | grep -v grep" - out = self.dut_session.send_expect(cmd, "#", 15) + out = self.sut_session.send_expect(cmd, "#", 15) if "testpmd" not in out: self.restart_testpmd() @@ -280,9 +280,9 @@ class IAVFRSSConfigureTest(TestCase): """ Run after each test suite. """ - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") self.destroy_vf() - self.dut.kill_all() + self.sut_node.kill_all() def launch_testpmd(self, param=""): """ @@ -311,11 +311,11 @@ class IAVFRSSConfigureTest(TestCase): self.pmdout.execute_cmd("start") def create_vf(self): - self.dut.bind_interfaces_linux("ice") - self.dut.generate_sriov_vfs_by_port(self.dut_ports[0], 1) - self.sriov_vfs_port = self.dut.ports_info[self.dut_ports[0]]["vfs_port"] - self.dut.send_expect("ifconfig %s up" % self.pf0_intf, "# ") - self.dut.send_expect( + self.sut_node.bind_interfaces_linux("ice") + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[0], 1) + self.sriov_vfs_port = self.sut_node.ports_info[self.sut_ports[0]]["vfs_port"] + self.sut_node.send_expect("ifconfig %s up" % self.pf0_intf, "# ") + self.sut_node.send_expect( "ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "#" ) self.vf0_pci = self.sriov_vfs_port[0].pci @@ -327,9 +327,9 @@ class IAVFRSSConfigureTest(TestCase): raise Exception(e) def destroy_vf(self): - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) def set_rss_configure(self, rss_type): if rss_type != "": diff --git a/tests/TestSuite_ice_ip_fragment_rte_flow.py b/tests/TestSuite_ice_ip_fragment_rte_flow.py index 174d1e3c..89ddd0ea 100644 --- a/tests/TestSuite_ice_ip_fragment_rte_flow.py +++ b/tests/TestSuite_ice_ip_fragment_rte_flow.py @@ -8,8 +8,8 @@ import time from scapy.all import * import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -323,34 +323,34 @@ tv_mac_ipv6_fragment_rss = { class TestICEIpFragmentRteFlow(TestCase): def set_up_all(self): - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) # init pkt - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() # set default app parameter - self.pmd_out = PmdOutput(self.dut) - self.tester_mac = self.tester.get_mac(0) - self.tester_port0 = self.tester.get_local_port(self.ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) + self.pmd_out = PmdOutput(self.sut_node) + self.tg_mac = self.tg_node.get_mac(0) + self.tg_port0 = self.tg_node.get_local_port(self.ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) - self.tester.send_expect("ifconfig {} up".format(self.tester_iface0), "# ") + self.tg_node.send_expect("ifconfig {} up".format(self.tg_iface0), "# ") self.param = "--rxq={} --txq={} --disable-rss --txd=384 --rxd=384".format( LAUNCH_QUEUE, LAUNCH_QUEUE ) self.param_fdir = "--rxq={} --txq={}".format(LAUNCH_QUEUE, LAUNCH_QUEUE) - self.cores = self.dut.get_core_list("1S/4C/1T") + self.cores = self.sut_node.get_core_list("1S/4C/1T") - self.ports_pci = [self.dut.ports_info[self.ports[0]]["pci"]] + self.ports_pci = [self.sut_node.ports_info[self.ports[0]]["pci"]] self.rssprocess = rfc.RssProcessing( - self, self.pmd_out, [self.tester_iface0], LAUNCH_QUEUE, ipfrag_flag=True + self, self.pmd_out, [self.tg_iface0], LAUNCH_QUEUE, ipfrag_flag=True ) self.fdirprocess = rfc.FdirProcessing( - self, self.pmd_out, [self.tester_iface0], LAUNCH_QUEUE, ipfrag_flag=True + self, self.pmd_out, [self.tg_iface0], LAUNCH_QUEUE, ipfrag_flag=True ) def set_up(self): - self.dut.bind_interfaces_linux("vfio-pci") + self.sut_node.bind_interfaces_linux("vfio-pci") def launch_testpmd(self, param_fdir=False): """ @@ -366,16 +366,16 @@ class TestICEIpFragmentRteFlow(TestCase): self.pmd_out.start_testpmd( cores=self.cores, ports=self.ports_pci, param=self.param ) - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") def tear_down(self): - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() def test_mac_ipv4_frag_fdir(self): self.launch_testpmd(param_fdir=True) @@ -436,7 +436,7 @@ class TestICEIpFragmentRteFlow(TestCase): self.logger.error("Error: queue index {} != '2'".format(queue[0][0])) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 2: exclusive validation fdir rule") @@ -455,7 +455,7 @@ class TestICEIpFragmentRteFlow(TestCase): self.logger.error("Error: queue index {} != '2'".format(queue[0][0])) continue result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 3: exclusive validation rss rule") @@ -483,7 +483,7 @@ class TestICEIpFragmentRteFlow(TestCase): result = False self.logger.error("hash value is incorrect") result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.logger.info("Subcase 4: exclusive validation rss rule") @@ -502,7 +502,7 @@ class TestICEIpFragmentRteFlow(TestCase): result = False self.logger.error("hash value is incorrect") result_list.append(result) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.logger.info("*********subcase test result %s" % result_list) self.verify(all(result_list) is True, "sub-case failed {}".format(result_list)) diff --git a/tests/TestSuite_ice_limit_value_test.py b/tests/TestSuite_ice_limit_value_test.py index 6954e885..80f6f824 100644 --- a/tests/TestSuite_ice_limit_value_test.py +++ b/tests/TestSuite_ice_limit_value_test.py @@ -9,8 +9,8 @@ import time import framework.utils as utils import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .rte_flow_common import TXQ_RXQ_NUMBER @@ -47,35 +47,35 @@ class TestICELimitValue(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.portMask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - localPort1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(localPort0) - self.tester_iface1 = self.tester.get_interface(localPort1) - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_intf = self.dut.ports_info[self.dut_ports[1]]["intf"] - self.pf0_mac = self.dut.get_mac_address(0) - self.pf1_mac = self.dut.get_mac_address(1) - self.pci0 = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pci1 = self.dut.ports_info[self.dut_ports[1]]["pci"] - - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) - self.path = self.dut.apps_name["test-pmd"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.portMask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + localPort1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(localPort0) + self.tg_iface1 = self.tg_node.get_interface(localPort1) + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_intf = self.sut_node.ports_info[self.sut_ports[1]]["intf"] + self.pf0_mac = self.sut_node.get_mac_address(0) + self.pf1_mac = self.sut_node.get_mac_address(1) + self.pci0 = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pci1 = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) + self.path = self.sut_node.apps_name["test-pmd"] self.src_file_dir = "dep/" self.q_num = TXQ_RXQ_NUMBER # max_rule number - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - self.pkt = Packet() + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + self.scapy_pkt_builder = ScapyPacketBuilder() self.is_chapman = self.is_chapman_beach() def set_up_for_iavf_dir(self): @@ -83,29 +83,29 @@ class TestICELimitValue(TestCase): Run before each test case. """ # bind pf to kernel - for port in self.dut_ports: - netdev = self.dut.ports_info[port]["port"] + for port in self.sut_ports: + netdev = self.sut_node.ports_info[port]["port"] netdev.bind_driver(driver="ice") # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.suite_config = rfc.get_suite_config(self) self.setup_2pf_4vf_env() def setup_2pf_4vf_env(self, driver="default"): # get PF interface name - self.used_dut_port_0 = self.dut_ports[0] - self.used_dut_port_1 = self.dut_ports[1] + self.used_sut_port_0 = self.sut_ports[0] + self.used_sut_port_1 = self.sut_ports[1] # generate 2 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 2, driver=driver) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 2, driver=driver) - self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.sriov_vfs_pf1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 2, driver=driver) + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 2, driver=driver) + self.sriov_vfs_pf0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.sriov_vfs_pf1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] self.mac_list = [f"00:11:22:33:44:{mac}" for mac in [55, 66, 77, 88]] for i in range(0, 4): - self.dut.send_expect( + self.sut_node.send_expect( "ip link set {} vf {} mac {}".format( eval(f"self.pf{i//2}_intf"), i % 2, self.mac_list[i] ), @@ -122,20 +122,20 @@ class TestICELimitValue(TestCase): except Exception as e: self.destroy_env() raise Exception(e) - out = self.dut.send_expect("./usertools/dpdk-devbind.py -s", "#") + out = self.sut_node.send_expect("./usertools/dpdk-devbind.py -s", "#") print(out) def setup_1pf_vfs_env(self, pf_port=0, driver="default"): - self.used_dut_port_0 = self.dut_ports[pf_port] + self.used_sut_port_0 = self.sut_ports[pf_port] # get PF interface name - self.pf0_intf = self.dut.ports_info[self.used_dut_port_0]["intf"] - out = self.dut.send_expect("ethtool -i %s" % self.pf0_intf, "#") + self.pf0_intf = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + out = self.sut_node.send_expect("ethtool -i %s" % self.pf0_intf, "#") # generate 4 VFs on PF - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 4, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 4, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] # set VF0 as trust - self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + self.sut_node.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") # bind VFs to dpdk driver for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) @@ -145,7 +145,7 @@ class TestICELimitValue(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -153,13 +153,13 @@ class TestICELimitValue(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -170,10 +170,10 @@ class TestICELimitValue(TestCase): """ This is to stop testpmd and destroy 1pf and 2vfs environment. """ - self.dut.send_expect("quit", "# ", 60) + self.sut_node.send_expect("quit", "# ", 60) time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[1]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[1]) def config_testpmd(self): self.pmd_output.execute_cmd("set fwd rxonly") @@ -209,9 +209,9 @@ class TestICELimitValue(TestCase): self.config_testpmd() def send_packets(self, packets, pf_id=0): - self.pkt.update_pkt(packets) - tx_port = self.tester_iface0 if pf_id == 0 else self.tester_iface1 - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port) + self.scapy_pkt_builder.update_pkt(packets) + tx_port = self.tg_iface0 if pf_id == 0 else self.tg_iface1 + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port) def send_pkts_getouput(self, pkts, pf_id=0): """ @@ -220,7 +220,7 @@ class TestICELimitValue(TestCase): """ self.send_packets(pkts, pf_id) time.sleep(1) - out_info = self.dut.get_session_output(timeout=1) + out_info = self.sut_node.get_session_output(timeout=1) out_pkt = self.pmd_output.execute_cmd("stop") out = out_info + out_pkt self.pmd_output.execute_cmd("start") @@ -282,7 +282,7 @@ class TestICELimitValue(TestCase): check the rules in list identical to ones in rule_list """ if session_name == "": - session_name = self.dut + session_name = self.sut_node out = session_name.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) @@ -303,7 +303,7 @@ class TestICELimitValue(TestCase): return result def check_rule_number(self, port_id=0, num=0): - out = self.dut.send_command("flow list %s" % port_id, timeout=30) + out = self.sut_node.send_command("flow list %s" % port_id, timeout=30) result_scanner = r"\d*.*?\d*.*?\d*.*?=>*" scanner = re.compile(result_scanner, re.DOTALL) li = scanner.findall(out) @@ -315,7 +315,7 @@ class TestICELimitValue(TestCase): return out def get_rule_number(self, port_id=0): - out = self.dut.send_command("flow list %s" % port_id, timeout=300) + out = self.sut_node.send_command("flow list %s" % port_id, timeout=300) result_scanner = r"\d*.*?\d*.*?\d*.*?=>*" scanner = re.compile(result_scanner, re.DOTALL) li = scanner.findall(out) @@ -326,14 +326,14 @@ class TestICELimitValue(TestCase): general packets processing workflow. """ if session_name == "": - session_name = self.dut + session_name = self.sut_node if tx_iface == "": tx_iface = self.__tx_iface session_name.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_iface, count=1, timeout=370) time.sleep(3) out = session_name.send_expect("stop", "testpmd> ", 15) dic["check_func"]["func"]( @@ -341,8 +341,8 @@ class TestICELimitValue(TestCase): ) def get_nic_product_name(self, port_id=0): - pf_pci = self.dut.ports_info[port_id]["pci"] - out = self.dut.send_expect( + pf_pci = self.sut_node.ports_info[port_id]["pci"] + out = self.sut_node.send_expect( 'lspci -s {} -vvv |grep "Product Name"'.format(pf_pci), "#" ) res = re.search(r"Network Adapter\s+(?PE810-.*)", out) @@ -363,9 +363,9 @@ class TestICELimitValue(TestCase): E810-CQDA2 Intel® Ethernet 800 Series 100g*2 14336 E810-2CQDA2 chapmanbeach100g*2 14336 (1 vf) """ - dut_file_dir = "/tmp/" + sut_file_dir = "/tmp/" self.set_up_for_iavf_dir() - self.dut.kill_all() + self.sut_node.kill_all() src_file = "create_14336_rules" flows = open(self.src_file_dir + src_file, mode="w") count = 0 @@ -378,7 +378,7 @@ class TestICELimitValue(TestCase): count = count + 1 flows.close() self.verify(count == 14336, "failed to create 14336 fdir rules on vf.") - self.dut.session.copy_file_to(self.src_file_dir + src_file, dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, sut_file_dir) eal_param = "-c f -n 6 -a %s -a %s" % ( self.sriov_vfs_pf0[0].pci, @@ -388,9 +388,9 @@ class TestICELimitValue(TestCase): self.path + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) - + " --cmdline-file=%s" % (dut_file_dir + src_file) + + " --cmdline-file=%s" % (sut_file_dir + src_file) ) - self.dut.send_expect(command, "testpmd> ", 300) + self.sut_node.send_expect(command, "testpmd> ", 300) self.config_testpmd() # can't create more than 14336 rules on vf0 @@ -432,7 +432,7 @@ class TestICELimitValue(TestCase): ) # delete one rule of vf0 - self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", timeout=200) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", timeout=200) self.create_fdir_rule(rule_0_vf1, check_stats=True) pkt_0_vf1 = 'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.56.0")/Raw("x" * 80)' out_0_vf1 = self.send_pkts_getouput(pkts=pkt_0_vf1, pf_id=0) @@ -444,7 +444,7 @@ class TestICELimitValue(TestCase): ) # flush all the rules - self.dut.send_expect("flow flush 0", "testpmd> ", timeout=500) + self.sut_node.send_expect("flow flush 0", "testpmd> ", timeout=500) self.check_fdir_rule(port_id=0, stats=False) out_0 = self.send_pkts_getouput(pkts=pkt_0, pf_id=0) out_14335 = self.send_pkts_getouput(pkts=pkt_14335, pf_id=0) @@ -479,10 +479,10 @@ class TestICELimitValue(TestCase): E810-2CQDA2 chapmanbeach100g*2 14336*2 (if vfs generated by 2 pf port, each can create 14336 rules at most) """ - dut_file_dir = "/tmp/" - self.dut.kill_all() + sut_file_dir = "/tmp/" + self.sut_node.kill_all() self.set_up_for_iavf_dir() - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() # create one rule on vf0 and 14335 rules on vf1, if card is chapman beach100g*2,needs to create one rule on # vf2 and 14335 rules on vf3 in addition max_rules = 14336 @@ -506,9 +506,9 @@ class TestICELimitValue(TestCase): if i != 0 or j != 0: k = 1 file_handle.write(rule.format(k, i, j)) - self.dut.session.copy_file_to(src_file, dut_file_dir) + self.sut_node.session.copy_file_to(src_file, sut_file_dir) param = "--rxq={} --txq={} --cmdline-file={}".format( - self.q_num, self.q_num, os.path.join(dut_file_dir, file_name) + self.q_num, self.q_num, os.path.join(sut_file_dir, file_name) ) self.pmd_output.start_testpmd(param=param, ports=ports, timeout=1200) self.config_testpmd() @@ -560,7 +560,7 @@ class TestICELimitValue(TestCase): ) # destroy one rule of vf0 and create a new rule on vf1 - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.create_fdir_rule(rule_14336_vf.format(1), check_stats=True) # send matched packet for new rule of vf1 @@ -608,7 +608,7 @@ class TestICELimitValue(TestCase): ) # destroy one rule of vf2 and create a new rule on vf3 - self.dut.send_expect("flow flush 2", "testpmd> ") + self.sut_node.send_expect("flow flush 2", "testpmd> ") self.create_fdir_rule(rule_14336_vf.format(3), check_stats=True) # send matched packet for new rule of vf3 @@ -622,7 +622,7 @@ class TestICELimitValue(TestCase): # flush all the rules and check the rule list,no rule listed for i in range(len(ports)): - self.dut.send_expect(f"flow flush {i}", "testpmd> ", timeout=500) + self.sut_node.send_expect(f"flow flush {i}", "testpmd> ", timeout=500) self.check_fdir_rule(port_id=i, stats=False) # verify matched packet received without FDIR matched ID @@ -640,7 +640,7 @@ class TestICELimitValue(TestCase): check_param={"port_id": k, "mark_id": 0, "queue": 5}, stats=False, ) - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def test_maxnum_rules_1pf_2vf(self): """ @@ -650,10 +650,10 @@ class TestICELimitValue(TestCase): if hardware is chapman beach 100g*2, 1 pf can create 2048 rules,vfs generated by the same pf share 14336 rules, so this card can create (2048 + 14336)*2=32768 rules """ - dut_file_dir = "/tmp/" - self.dut.kill_all() + sut_file_dir = "/tmp/" + self.sut_node.kill_all() self.set_up_for_iavf_dir() - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() # create kernel rules on pf1 rule = "ethtool -N {} flow-type tcp4 src-ip 192.168.{}.{} dst-ip 192.168.100.2 src-port 32 dst-port 33 action 8 \n" if self.nic in ["ICE_100G-E810C_QSFP"]: @@ -664,13 +664,13 @@ class TestICELimitValue(TestCase): num = 2 for i in range(num): for j in range(256): - self.dut.send_expect(rule.format(self.pf0_intf, i, j), "#") + self.sut_node.send_expect(rule.format(self.pf0_intf, i, j), "#") if self.is_chapman: - self.dut.send_expect(rule.format(self.pf1_intf, i, j), "#") + self.sut_node.send_expect(rule.format(self.pf1_intf, i, j), "#") - self.dut.send_expect(rule.format(self.pf0_intf, "100", "0"), "#") + self.sut_node.send_expect(rule.format(self.pf0_intf, "100", "0"), "#") if self.is_chapman: - self.dut.send_expect(rule.format(self.pf1_intf, "100", "0"), "#") + self.sut_node.send_expect(rule.format(self.pf1_intf, "100", "0"), "#") # create 1 rule on vf0, and 14334 rules on vf1, if card is chapman beach100g*2,needs to create 1 rule on # vf2 and 14334 rules on vf3 in addition @@ -697,9 +697,9 @@ class TestICELimitValue(TestCase): if i != 0 or j != 0: k = 1 file_handle.write(rule.format(k, i, j)) - self.dut.session.copy_file_to(src_file, dut_file_dir) + self.sut_node.session.copy_file_to(src_file, sut_file_dir) param = "--rxq={} --txq={} --cmdline-file={}".format( - self.q_num, self.q_num, os.path.join(dut_file_dir, file_name) + self.q_num, self.q_num, os.path.join(sut_file_dir, file_name) ) self.pmd_output.start_testpmd(param=param, ports=ports, timeout=1200) self.config_testpmd() @@ -802,7 +802,7 @@ class TestICELimitValue(TestCase): # flush all the rules and check the rule list,no rule listed for i in range(len(ports)): - self.dut.send_expect(f"flow flush {i}", "testpmd> ", timeout=500) + self.sut_node.send_expect(f"flow flush {i}", "testpmd> ", timeout=500) self.check_fdir_rule(port_id=i, stats=False) # verify matched packet received without FDIR matched ID @@ -820,7 +820,7 @@ class TestICELimitValue(TestCase): check_param={"port_id": k, "mark_id": 0, "queue": 5}, stats=False, ) - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) def test_maxnum_rules_1pf_0_rules_vf(self): """ @@ -831,10 +831,10 @@ class TestICELimitValue(TestCase): if hardware is chapman beach 100g*2, 1 pf can create 2048 rules,vfs generated by the same pf share 14336 rules, so if create 14386 rules on pf1,check failed to create rule on vf00 and vf10(vf00 and vf10 generated by pf1) """ - dut_file_dir = "/tmp/" - self.dut.kill_all() + sut_file_dir = "/tmp/" + self.sut_node.kill_all() self.set_up_for_iavf_dir() - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() # create maxinum rules on pf1 src_file = "create_15360_kernel_rules" flows = open(self.src_file_dir + src_file, mode="w") @@ -865,15 +865,15 @@ class TestICELimitValue(TestCase): count = count + 1 flows.close() self.verify(count == 14848, "failed to create 14848 fdir rules on pf.") - self.dut.session.copy_file_to(self.src_file_dir + src_file, dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, sut_file_dir) # create maxinum rules on pf1 fkr = open(self.src_file_dir + src_file, "r+") kernel_rules = fkr.read() fkr.close() - self.dut.send_expect(kernel_rules, "# ") + self.sut_node.send_expect(kernel_rules, "# ") time.sleep(200) # failed to create 1 more rule on pf1 - self.dut.send_expect( + self.sut_node.send_expect( "ethtool -N %s flow-type tcp4 src-ip 192.168.100.0 dst-ip 192.168.100.2 src-port 32 dst-port 33 action 8" % self.pf1_intf, "Cannot insert RX class rule: No space left on device", @@ -893,7 +893,7 @@ class TestICELimitValue(TestCase): + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) ) - self.dut.send_expect(command, "testpmd> ", 20) + self.sut_node.send_expect(command, "testpmd> ", 20) self.config_testpmd() rule_0_vf00 = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.56.0 / end actions queue index 5 / mark / end" @@ -948,14 +948,14 @@ class TestICELimitValue(TestCase): stats=True, ) - self.dut.send_expect("quit", "# ") - self.dut.close_session(self.session_secondary) + self.sut_node.send_expect("quit", "# ") + self.sut_node.close_session(self.session_secondary) def test_stress_add_delete_rules_1vf(self): """ add/delete rules 14336 times on 1 vf """ - dut_file_dir = "/tmp/" + sut_file_dir = "/tmp/" rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / udp src is 22 dst is 23 / end actions queue index 6 / mark / end", "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions rss queues 2 3 end / mark id 1 / end", @@ -964,7 +964,7 @@ class TestICELimitValue(TestCase): 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/UDP(sport=22,dport=23)/Raw("x" * 80)', 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)', ] - self.dut.kill_all() + self.sut_node.kill_all() self.set_up_for_iavf_dir() src_file = "add_delete_rules_1vf" flows = open(self.src_file_dir + src_file, mode="w") @@ -978,7 +978,7 @@ class TestICELimitValue(TestCase): self.verify( count == 14336, "failed to add/delete 14336 times of fdir rules on vf." ) - self.dut.session.copy_file_to(self.src_file_dir + src_file, dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, sut_file_dir) eal_param = "-c f -n 6 -a %s -a %s" % ( self.sriov_vfs_pf0[0].pci, @@ -988,9 +988,9 @@ class TestICELimitValue(TestCase): self.path + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) - + " --cmdline-file=%s" % (dut_file_dir + src_file) + + " --cmdline-file=%s" % (sut_file_dir + src_file) ) - self.dut.send_expect(command, "testpmd> ", 900) + self.sut_node.send_expect(command, "testpmd> ", 900) self.config_testpmd() self.check_fdir_rule(port_id=0, stats=False) self.create_fdir_rule(rules, check_stats=True) @@ -1013,7 +1013,7 @@ class TestICELimitValue(TestCase): """ add/delete rules 14336 times on 2 vfs """ - dut_file_dir = "/tmp/" + sut_file_dir = "/tmp/" rules = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.56.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 5 / end", "flow create 1 ingress pattern eth / ipv4 src is 192.168.56.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 5 / end", @@ -1022,7 +1022,7 @@ class TestICELimitValue(TestCase): 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.56.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)', 'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.56.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)', ] - self.dut.kill_all() + self.sut_node.kill_all() self.set_up_for_iavf_dir() src_file = "add_delete_rules_2vfs" flows = open(self.src_file_dir + src_file, mode="w") @@ -1037,7 +1037,7 @@ class TestICELimitValue(TestCase): self.verify( count == 14336, "failed to add/delete 14336 times of fdir rules on 2 vfs." ) - self.dut.session.copy_file_to(self.src_file_dir + src_file, dut_file_dir) + self.sut_node.session.copy_file_to(self.src_file_dir + src_file, sut_file_dir) eal_param = "-c f -n 6 -a %s -a %s" % ( self.sriov_vfs_pf0[0].pci, @@ -1047,9 +1047,9 @@ class TestICELimitValue(TestCase): self.path + eal_param + " -- -i --rxq=%s --txq=%s" % (self.q_num, self.q_num) - + " --cmdline-file=%s" % (dut_file_dir + src_file) + + " --cmdline-file=%s" % (sut_file_dir + src_file) ) - self.dut.send_expect(command, "testpmd> ", 900) + self.sut_node.send_expect(command, "testpmd> ", 900) self.config_testpmd() self.check_fdir_rule(port_id=0, stats=False) self.check_fdir_rule(port_id=1, stats=False) @@ -1079,7 +1079,7 @@ class TestICELimitValue(TestCase): self.launch_testpmd_with_mark() self.pmd_output.execute_cmd("start") self.pmd_output.execute_cmd("stop") - self.dut.send_command("quit", timeout=2) + self.sut_node.send_command("quit", timeout=2) cmd_path = "/tmp/add_delete_rules" cmds = [ "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / udp src is 22 dst is 23 / end actions queue index 1 / mark / end", @@ -1090,17 +1090,17 @@ class TestICELimitValue(TestCase): cmds_li = map(lambda x: x + os.linesep, cmds) with open(cmd_path, "w") as f: f.writelines(cmds_li) - self.dut.session.copy_file_to(cmd_path, cmd_path) + self.sut_node.session.copy_file_to(cmd_path, cmd_path) try: - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[self.pci0, self.pci1], socket=self.ports_socket ) param = ( " --log-level='ice,7' -- -i --portmask=%s --rxq=%d --txq=%d --port-topology=loop --cmdline-file=%s" % (self.portMask, 64, 64, cmd_path) ) - command_line = self.dut.apps_name["test-pmd"] + eal_param + param - out = self.dut.send_expect(command_line, "testpmd>", timeout=1200) + command_line = self.sut_node.apps_name["test-pmd"] + eal_param + param + out = self.sut_node.send_expect(command_line, "testpmd>", timeout=1200) self.verify( "Failed to create file" not in out, "create some rule failed: %s" % out ) @@ -1139,16 +1139,16 @@ class TestICELimitValue(TestCase): except Exception as e: raise Exception(e) finally: - self.dut.kill_all() + self.sut_node.kill_all() # this case copy from ice_dcf_switch_filter def test_max_rule_number(self): # bind pf to kernel - self.bind_nics_driver(self.dut_ports, driver="ice") + self.bind_nics_driver(self.sut_ports, driver="ice") # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.path = self.sut_node.apps_name["test-pmd"] # set up 4 vfs on 1 pf environment self.setup_1pf_vfs_env() @@ -1174,12 +1174,12 @@ class TestICELimitValue(TestCase): if rule_count > 32500: break flows.close() - dut_file_dir = "/tmp/" - self.dut.session.copy_file_to(src_file, dut_file_dir) + sut_file_dir = "/tmp/" + self.sut_node.session.copy_file_to(src_file, sut_file_dir) # launch testpmd with 32500 rules vf0_pci = self.sriov_vfs_port_0[0].pci vf1_pci = self.sriov_vfs_port_0[1].pci - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[vf0_pci, vf1_pci], port_options={vf0_pci: "cap=dcf"}, @@ -1189,10 +1189,10 @@ class TestICELimitValue(TestCase): + all_eal_param + " -- -i --cmdline-file=/tmp/testpmd_cmds_32k_switch_rules" ) - out = self.dut.send_expect(command, "testpmd> ", 360) + out = self.sut_node.send_expect(command, "testpmd> ", 360) self.testpmd_status = "running" - self.dut.send_expect("set portlist 1", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set portlist 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) # check the rule list with 32500 rules rule_list_num = list(range(0, 32500)) rule_list = [str(x) for x in rule_list_num] @@ -1213,7 +1213,7 @@ class TestICELimitValue(TestCase): 'Ether(dst="68:05:ca:8d:ed:a8")/IP(src="192.168.%d.%d")/TCP(sport=25,dport=23)/Raw("X"*480)' % (i, j) ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( rule, "testpmd> ", timeout=2 ) # create a rule time.sleep(0.5) @@ -1241,7 +1241,7 @@ class TestICELimitValue(TestCase): mismatched_dic = tv_max_rule_number["mismatched"] self.send_and_check_packets(mismatched_dic) # destroy rules and send matched packets - self.dut.send_expect("flow flush 0", "testpmd> ", 300) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 300) self.check_switch_filter_rule_list(0, []) # send matched packets and check destroy_dict = copy.deepcopy(matched_dic) @@ -1250,13 +1250,13 @@ class TestICELimitValue(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_env() if getattr(self, "session_secondary", None): - self.dut.close_session(self.session_secondary) + self.sut_node.close_session(self.session_secondary) if getattr(self, "session_third", None): - self.dut.close_session(self.session_third) + self.sut_node.close_session(self.session_third) def tear_down_all(self): - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_env() diff --git a/tests/TestSuite_ice_qinq.py b/tests/TestSuite_ice_qinq.py index b13f5729..57f1ae57 100644 --- a/tests/TestSuite_ice_qinq.py +++ b/tests/TestSuite_ice_qinq.py @@ -6,8 +6,8 @@ import random import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg from framework.utils import GREEN, RED @@ -413,36 +413,36 @@ class TestICEQinq(TestCase): prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/4C/1T") + cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.tester_iface1 = self.tester.get_interface(self.tester_port1) - - self.used_dut_port = self.dut_ports[0] - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - port = self.dut.ports_info[0]["port"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.tg_iface1 = self.tg_node.get_interface(self.tg_port1) + + self.used_sut_port = self.sut_ports[0] + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + port = self.sut_node.ports_info[0]["port"] port.bind_driver() # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state(self.pf_interface, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.pf_interface, self.flag) self.vf_flag = False self.vf0_mac = "" self.vf1_mac = "00:11:22:33:44:11" self.vf2_mac = "00:11:22:33:44:22" self.vf3_mac = "00:11:22:33:44:33" - self.path = self.dut.apps_name["test-pmd"] - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.path = self.sut_node.apps_name["test-pmd"] + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) def reload_ice(self): - self.dut.send_expect("rmmod ice && modprobe ice", "# ") + self.sut_node.send_expect("rmmod ice && modprobe ice", "# ") def set_up(self): """ @@ -451,7 +451,7 @@ class TestICEQinq(TestCase): self.reload_ice() self.pci_list = [] if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf_interface, self.flag, self.default_stats), "# ", @@ -460,29 +460,29 @@ class TestICEQinq(TestCase): def setup_pf_vfs_env(self, vfs_num=4): if self.vf_flag is False: - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port, vfs_num, driver=self.kdriver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port, vfs_num, driver=self.kdriver ) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True if vfs_num > 1: - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 trust on" % (self.pf_interface), "# " ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 1 mac %s" % (self.pf_interface, self.vf1_mac), "# ", ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 2 mac %s" % (self.pf_interface, self.vf2_mac), "# ", ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 3 mac %s" % (self.pf_interface, self.vf3_mac), "# ", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf_interface, self.vf1_mac), "# ", ) @@ -493,18 +493,18 @@ class TestICEQinq(TestCase): self.pci_list.append(port.pci) self.vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} - self.dut.send_expect("ifconfig %s up" % self.pf_interface, "# ") - self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s up" % self.pf_interface, "# ") + self.sut_node.send_expect( "ip link set dev %s vf 0 spoofchk off" % self.pf_interface, "# " ) if vfs_num == 4: - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 1 spoofchk off" % self.pf_interface, "# " ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 2 spoofchk off" % self.pf_interface, "# " ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 3 spoofchk off" % self.pf_interface, "# " ) except Exception as e: @@ -513,7 +513,7 @@ class TestICEQinq(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def launch_testpmd(self, vfs_num=4, dcf_param=False): @@ -614,14 +614,14 @@ class TestICEQinq(TestCase): rule_id = 0 if isinstance(rule_id, list): for i in rule_id: - out = self.dut.send_command( + out = self.sut_node.send_command( "flow destroy %s rule %s" % (port_id, i), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule %s delete failed" % rule_id) else: - out = self.dut.send_command( + out = self.sut_node.send_command( "flow destroy %s rule %s" % (port_id, rule_id), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") @@ -630,12 +630,12 @@ class TestICEQinq(TestCase): self.check_switch_rule(stats=False) def send_packets(self, pkts, tx_port=None, count=1): - self.pkt.update_pkt(pkts) - tx_port = self.tester_iface0 if not tx_port else tx_port - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count) + self.scapy_pkt_builder.update_pkt(pkts) + tx_port = self.tg_iface0 if not tx_port else tx_port + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port, count=count) def send_pkts_getouput(self, pkts, port_id=0, count=1): - tx_port = self.tester_iface0 if port_id == 0 else self.tester_iface1 + tx_port = self.tg_iface0 if port_id == 0 else self.tg_iface1 self.send_packets(pkts, tx_port=tx_port, count=count) time.sleep(0.5) out = self.pmd_output.get_output() @@ -691,8 +691,8 @@ class TestICEQinq(TestCase): self.logger.info((GREEN("subcase passed: %s" % test["name"]))) except Exception as e: self.logger.warning((RED(e))) - self.dut.send_command("flow flush 0", timeout=1) - self.dut.send_command("flow flush 1", timeout=1) + self.sut_node.send_command("flow flush 0", timeout=1) + self.sut_node.send_command("flow flush 1", timeout=1) test_results[test["name"]] = False self.logger.info((RED("subcase failed: %s" % test["name"]))) continue @@ -703,8 +703,8 @@ class TestICEQinq(TestCase): self.verify(all(test_results.values()), "{} failed.".format(failed_cases)) def start_tcpdump(self, rxItf): - self.tester.send_expect("rm -rf getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -A -nn -e -vv -w getPackageByTcpdump.cap -i %s 2> /dev/null& " % rxItf, "#", @@ -713,8 +713,8 @@ class TestICEQinq(TestCase): def get_tcpdump_package(self): time.sleep(1) - self.tester.send_expect("killall tcpdump", "#") - return self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + return self.tg_node.send_expect( "tcpdump -A -nn -e -vv -r getPackageByTcpdump.cap", "#" ) @@ -795,7 +795,7 @@ class TestICEQinq(TestCase): def send_packet_check_vlan_strip(self, pkts, outer=False, inner=False): for pkt in pkts: pkt_index = pkts.index(pkt) - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt) self.check_packets(out, 2, pkt_num=1) tcpdump_out = self.get_tcpdump_package() @@ -853,7 +853,7 @@ class TestICEQinq(TestCase): ): for pkt in pkts: pkt_index = pkts.index(pkt) - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt) self.check_packets(out, port_id) p = "vlan (\d+)" @@ -982,7 +982,7 @@ class TestICEQinq(TestCase): % self.vf1_mac, ] if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) @@ -1027,7 +1027,7 @@ class TestICEQinq(TestCase): % self.vf1_mac, ] if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) @@ -1040,13 +1040,13 @@ class TestICEQinq(TestCase): self.verify(len(receive_pkt) == 0, "Failed error received vlan packet!") self.pmd_output.execute_cmd("rx_vlan add 1 0") - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt_list1) tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("dst=%s" % self.vf1_mac, out) self.verify(len(receive_pkt) == 2, "Failed error received vlan packet!") - tester_pkt = re.findall("vlan \d+", tcpdump_out) - self.verify(len(tester_pkt) == 6, "Failed pass received vlan packet!") + tg_pkt = re.findall("vlan \d+", tcpdump_out) + self.verify(len(tg_pkt) == 6, "Failed pass received vlan packet!") out = self.send_pkts_getouput(pkt_list2) receive_pkt = re.findall("dst=%s" % self.vf1_mac, out) @@ -1068,7 +1068,7 @@ class TestICEQinq(TestCase): % self.vf1_mac, ] if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) @@ -1080,23 +1080,23 @@ class TestICEQinq(TestCase): self.pmd_output.execute_cmd("vlan set strip on 0") self.check_vlan_offload(vlan_type="strip", stats="on") - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt_list) tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("dst=%s" % self.vf1_mac, out) self.verify(len(receive_pkt) == 2, "Failed error received vlan packet!") - tester_pkt = re.findall("vlan \d+", tcpdump_out) - self.verify(len(tester_pkt) == 4, "Failed pass received vlan packet!") + tg_pkt = re.findall("vlan \d+", tcpdump_out) + self.verify(len(tg_pkt) == 4, "Failed pass received vlan packet!") self.pmd_output.execute_cmd("vlan set strip off 0") self.check_vlan_offload(vlan_type="strip", stats="off") - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt_list) tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("dst=%s" % self.vf1_mac, out) self.verify(len(receive_pkt) == 2, "Failed error received vlan packet!") - tester_pkt = re.findall("vlan \d+", tcpdump_out) - self.verify(len(tester_pkt) == 6, "Failed pass received vlan packet!") + tg_pkt = re.findall("vlan \d+", tcpdump_out) + self.verify(len(tg_pkt) == 6, "Failed pass received vlan packet!") def test_enable_disable_iavf_vlan_insert(self): """ @@ -1110,7 +1110,7 @@ class TestICEQinq(TestCase): % self.vf1_mac, ] if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s off" % (self.pf_interface, self.flag), "# ", ) @@ -1134,7 +1134,7 @@ class TestICEQinq(TestCase): self, pkt, pkt_len=None, vlan_strip=False, crc_strip=False ): if pkt_len: - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt) pkt_length = re.search("length=(\d+)", out).group(1) rx_bytes = re.search("RX-bytes:\s+(\d+)", out).group(1) @@ -1167,7 +1167,7 @@ class TestICEQinq(TestCase): % self.vf1_mac ) if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) @@ -1196,7 +1196,7 @@ class TestICEQinq(TestCase): self.pmd_output.execute_cmd("quit", "#") self.launch_testpmd(vfs_num=1) - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) self._send_pkt_check_vlan_and_crc(pkt, crc_strip=True) def test_CRC_strip_iavf_vlan_strip_coexists(self): @@ -1208,7 +1208,7 @@ class TestICEQinq(TestCase): % self.vf1_mac ) if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) @@ -1222,7 +1222,7 @@ class TestICEQinq(TestCase): self.pmd_output.execute_cmd("rx_vlan add 1 0") self.pmd_output.execute_cmd("start") - self.start_tcpdump(self.tester_iface0) + self.start_tcpdump(self.tg_iface0) out = self.send_pkts_getouput(pkt) tcpdump_out = self.get_tcpdump_package() pkt_len = re.search("length=(\d+)", out).group(1) @@ -1261,12 +1261,12 @@ class TestICEQinq(TestCase): def tear_down(self): self.pmd_output.execute_cmd("quit", "#") - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_iavf() def tear_down_all(self): if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf_interface, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_ice_rss_configure.py b/tests/TestSuite_ice_rss_configure.py index 28821269..f583ae2a 100644 --- a/tests/TestSuite_ice_rss_configure.py +++ b/tests/TestSuite_ice_rss_configure.py @@ -4,7 +4,7 @@ import time -from framework import packet +from framework import scapy_packet_builder from framework.pmd_output import PmdOutput from framework.test_case import TestCase @@ -287,18 +287,18 @@ class RSSConfigureTest(TestCase): Run at the start of each test suite. Generic filter Prerequistites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.dut.bind_interfaces_linux(self.drivername) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.sut_node.bind_interfaces_linux(self.drivername) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") # self.cores = "1S/8C/1T" - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.verify( self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"], "%s nic not support ethertype filter" % self.nic, @@ -367,4 +367,4 @@ class RSSConfigureTest(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_switch_filter.py b/tests/TestSuite_ice_switch_filter.py index c6dcd829..748cea0e 100644 --- a/tests/TestSuite_ice_switch_filter.py +++ b/tests/TestSuite_ice_switch_filter.py @@ -12,8 +12,8 @@ from collections import OrderedDict from itertools import groupby import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, skip_unsupported_pkg from framework.utils import BLUE, GREEN, RED @@ -4220,7 +4220,7 @@ class ICESwitchFilterTest(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -4228,13 +4228,13 @@ class ICESwitchFilterTest(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -4249,21 +4249,21 @@ class ICESwitchFilterTest(TestCase): self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"], "%s nic not support Intel® Ethernet 800 Series switch filter" % self.nic, ) - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - self.dut.send_expect("ifconfig %s up" % self.__tx_iface, "# ") - self.pkt = Packet() - self.pmd = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + self.sut_node.send_expect("ifconfig %s up" % self.__tx_iface, "# ") + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd = PmdOutput(self.sut_node) self.generate_file_with_fdir_rules() - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] def reload_ice(self): - self.dut.send_expect("rmmod ice", "# ", 15) - self.dut.send_expect("modprobe ice", "# ", 15) + self.sut_node.send_expect("rmmod ice", "# ", 15) + self.sut_node.send_expect("modprobe ice", "# ", 15) def set_up(self): """ @@ -4275,11 +4275,11 @@ class ICESwitchFilterTest(TestCase): """ generate file with fdir rules to make fdir table full, then test switch filter """ - pf_pci = self.dut.ports_info[0]["pci"] + pf_pci = self.sut_node.ports_info[0]["pci"] out = self.pmd.start_testpmd( "default", eal_param="-a %s --log-level=ice,7" % pf_pci ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.fdir_rule_number = self.pmd.get_max_rule_number(self, out) src_file = "dep/testpmd_cmds_rte_flow_fdir_rules" flows = open(src_file, mode="w") @@ -4297,8 +4297,8 @@ class ICESwitchFilterTest(TestCase): if rule_count > self.fdir_rule_number: break flows.close() - self.dut_file_dir = "/tmp" - self.dut.session.copy_file_to(src_file, self.dut_file_dir) + self.sut_file_dir = "/tmp" + self.sut_node.session.copy_file_to(src_file, self.sut_file_dir) self.fdir_file = "/tmp/testpmd_cmds_rte_flow_fdir_rules" def create_testpmd_command(self): @@ -4306,11 +4306,11 @@ class ICESwitchFilterTest(TestCase): Create testpmd command for non-pipeline mode """ # Prepare testpmd EAL and parameters - all_eal_param = self.dut.create_eal_parameters(cores="1S/4C/1T", ports=[0]) + all_eal_param = self.sut_node.create_eal_parameters(cores="1S/4C/1T", ports=[0]) command = ( self.path + all_eal_param + ' --log-level="ice,8" -- -i --rxq=16 --txq=16 ' ) - # command = "./%s/app/testpmd %s --log-level=\"ice,8\" -- -i %s" % (self.dut.target, all_eal_param, "--rxq=16 --txq=16") + # command = "./%s/app/testpmd %s --log-level=\"ice,8\" -- -i %s" % (self.sut_node.target, all_eal_param, "--rxq=16 --txq=16") return command def create_testpmd_command_pipeline_mode(self): @@ -4318,7 +4318,7 @@ class ICESwitchFilterTest(TestCase): Create testpmd command for pipeline mode """ # Prepare testpmd EAL and parameters - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[0], port_options={0: "pipeline-mode-support=1"} ) command = ( @@ -4334,16 +4334,16 @@ class ICESwitchFilterTest(TestCase): command = self.create_testpmd_command() else: command = self.create_testpmd_command_pipeline_mode() - out = self.dut.send_expect(command, "testpmd> ", 300) - self.dut.send_expect("port config all rss all", "testpmd> ", 15) - self.dut.send_expect( + out = self.sut_node.send_expect(command, "testpmd> ", 300) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 15) + self.sut_node.send_expect( "port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ", 15, ) - self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def send_and_check_packets(self, dic, port): """ @@ -4351,13 +4351,13 @@ class ICESwitchFilterTest(TestCase): """ # Specify the port to use dic["check_func"]["param"]["expect_port"] = port - self.dut.send_expect("start", "testpmd> ", 15) + self.sut_node.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=self.__tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.__tx_iface, count=1, timeout=370) time.sleep(3) - out = self.dut.send_expect("stop", "testpmd> ", 15) + out = self.sut_node.send_expect("stop", "testpmd> ", 15) result_flag, log_msg = dic["check_func"]["func"]( out, dic["check_func"]["param"], dic["expect_results"] ) @@ -4367,12 +4367,12 @@ class ICESwitchFilterTest(TestCase): """ general packets processing workflow. """ - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # send packets - self.pkt.update_pkt(packets_list) - self.pkt.send_pkt(self.tester, tx_port=self.__tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(packets_list) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.__tx_iface, count=1, timeout=370) time.sleep(3) - out = self.dut.send_expect("stop", "testpmd> ", 15) + out = self.sut_node.send_expect("stop", "testpmd> ", 15) p = re.compile(r"Forward Stats for RX Port= \d+/Queue=(\s?\d+)") res = p.findall(out) default_queue = [int(i) for i in res] @@ -4416,7 +4416,7 @@ class ICESwitchFilterTest(TestCase): rule_list = [] if isinstance(rte_flow_pattern, list): for rule in rte_flow_pattern: - out = self.dut.send_expect(rule, "testpmd> ") # create a rule + out = self.sut_node.send_expect(rule, "testpmd> ") # create a rule if s not in out: rule_list.append(False) else: @@ -4426,7 +4426,7 @@ class ICESwitchFilterTest(TestCase): else: rule_list.append(False) elif isinstance(rte_flow_pattern, str): - out = self.dut.send_expect(rte_flow_pattern, "testpmd> ") # create a rule + out = self.sut_node.send_expect(rte_flow_pattern, "testpmd> ") # create a rule if s not in out: rule_list.append(False) else: @@ -4464,7 +4464,7 @@ class ICESwitchFilterTest(TestCase): for i in rte_flow_pattern: length = len(i) rule_rep = i[0:5] + "validate" + i[11:length] - out = self.dut.send_expect(rule_rep, "testpmd> ") # validate a rule + out = self.sut_node.send_expect(rule_rep, "testpmd> ") # validate a rule if (p in out) and ("Failed" not in out): rule_list.append(True) else: @@ -4472,7 +4472,7 @@ class ICESwitchFilterTest(TestCase): elif isinstance(rte_flow_pattern, str): length = len(rte_flow_pattern) rule_rep = rte_flow_pattern[0:5] + "validate" + rte_flow_pattern[11:length] - out = self.dut.send_expect(rule_rep, "testpmd> ") # validate a rule + out = self.sut_node.send_expect(rule_rep, "testpmd> ") # validate a rule if (p in out) and ("Failed" not in out): rule_list.append(True) else: @@ -4510,7 +4510,7 @@ class ICESwitchFilterTest(TestCase): """ check the rules in list identical to ones in rule_list """ - out = self.dut.send_expect("flow list %d" % port_id, "testpmd> ", 15) + out = self.sut_node.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) if not m: @@ -4551,7 +4551,7 @@ class ICESwitchFilterTest(TestCase): destroy_list = [] if isinstance(rule_list, list): for i in rule_list: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow destroy %s rule %s" % (port_id, i), "testpmd> ", 15 ) m = p.search(out) @@ -4560,7 +4560,7 @@ class ICESwitchFilterTest(TestCase): else: destroy_list.append(False) else: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow destroy %s rule %s" % (port_id, rule_list), "testpmd> ", 15 ) m = p.search(out) @@ -4703,7 +4703,7 @@ class ICESwitchFilterTest(TestCase): if not result_flag: continue result_flag, overall_result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], [], is_non_pipeline, is_need_rss_rule, @@ -4720,7 +4720,7 @@ class ICESwitchFilterTest(TestCase): if not result_flag: continue result_flag, overall_result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], rule_list, is_non_pipeline, is_need_rss_rule, @@ -4733,7 +4733,7 @@ class ICESwitchFilterTest(TestCase): # send matched packets and check matched_dic = tv["matched"] result_flag, log_msg = self.send_and_check_packets( - matched_dic, self.dut_ports[0] + matched_dic, self.sut_ports[0] ) overall_result = self.save_results( pattern_name, "matched packets", result_flag, log_msg, overall_result @@ -4742,7 +4742,7 @@ class ICESwitchFilterTest(TestCase): mismatched_dic = tv["mismatched"] if len(list(mismatched_dic.keys())) != 0: result_flag, log_msg = self.send_and_check_packets( - mismatched_dic, self.dut_ports[0] + mismatched_dic, self.sut_ports[0] ) overall_result = self.save_results( pattern_name, "mismatched", result_flag, log_msg, overall_result @@ -4754,7 +4754,7 @@ class ICESwitchFilterTest(TestCase): if not result_flag: continue result_flag, overall_result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], [], is_non_pipeline, is_need_rss_rule, @@ -4770,7 +4770,7 @@ class ICESwitchFilterTest(TestCase): "func" ] result_flag, log_msg = self.send_and_check_packets( - check_destroy_dict, self.dut_ports[0] + check_destroy_dict, self.sut_ports[0] ) overall_result = self.save_results( pattern_name, @@ -4986,7 +4986,7 @@ class ICESwitchFilterTest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -5000,7 +5000,7 @@ class ICESwitchFilterTest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -5023,11 +5023,11 @@ class ICESwitchFilterTest(TestCase): all(rule1), "all rules should create successed, result {}".format(rule1) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule1, check_stats=False + port_id=self.sut_ports[0], rule_list=rule1, check_stats=False ) rule2 = self.create_switch_filter_rule( rte_flow_pattern=rule_list[1], check_stats=False @@ -5036,7 +5036,7 @@ class ICESwitchFilterTest(TestCase): all(rule2), "all rules should create successed, result {}".format(rule2) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") @@ -5052,7 +5052,7 @@ class ICESwitchFilterTest(TestCase): all(rule1), "all rules should create successed, result {}".format(rule1) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") matched_dic = { @@ -5063,18 +5063,18 @@ class ICESwitchFilterTest(TestCase): }, "expect_results": {"expect_pkts": 0}, } - self.pmd.wait_link_status_up(self.dut_ports[0]) + self.pmd.wait_link_status_up(self.sut_ports[0]) result_flag, log_msg = self.send_and_check_packets( - matched_dic, self.dut_ports[0] + matched_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be drop, result {}".format(log_msg) ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule1, check_stats=False + port_id=self.sut_ports[0], rule_list=rule1, check_stats=False ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(not result, "expect rule destroy successed") destroy_rule_dic = { @@ -5086,7 +5086,7 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": len(drop_any_pkt_list)}, } result_flag, log_msg = self.send_and_check_packets( - destroy_rule_dic, self.dut_ports[0] + destroy_rule_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received, result {}".format(log_msg) @@ -5104,7 +5104,7 @@ class ICESwitchFilterTest(TestCase): all(rule1), "all rules should create successed, result {}".format(rule1) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") matched_dic = { @@ -5115,19 +5115,19 @@ class ICESwitchFilterTest(TestCase): }, "expect_results": {"expect_pkts": len(drop_any_pkt_list)}, } - self.pmd.wait_link_status_up(self.dut_ports[0]) + self.pmd.wait_link_status_up(self.sut_ports[0]) result_flag, log_msg = self.send_and_check_packets( - matched_dic, self.dut_ports[0] + matched_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received by queue 4, result {}".format(log_msg), ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule1, check_stats=False + port_id=self.sut_ports[0], rule_list=rule1, check_stats=False ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(not result, "expect rule destroy successed") destroy_rule_dic = { @@ -5139,7 +5139,7 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": len(drop_any_pkt_list)}, } result_flag, log_msg = self.send_and_check_packets( - destroy_rule_dic, self.dut_ports[0] + destroy_rule_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be receive, result {}".format(log_msg) @@ -5159,7 +5159,7 @@ class ICESwitchFilterTest(TestCase): all(rule1), "all rules should create successed, result {}".format(rule1) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") subcase1_drop_dic = { @@ -5170,15 +5170,15 @@ class ICESwitchFilterTest(TestCase): }, "expect_results": {"expect_pkts": 0}, } - self.pmd.wait_link_status_up(self.dut_ports[0]) + self.pmd.wait_link_status_up(self.sut_ports[0]) result_flag, log_msg = self.send_and_check_packets( - subcase1_drop_dic, self.dut_ports[0] + subcase1_drop_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be dropped, result {}".format(log_msg) ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule1[0], check_stats=False + port_id=self.sut_ports[0], rule_list=rule1[0], check_stats=False ) subcase1_queue_4_dic = { "scapy_str": drop_any_pkt_list, @@ -5189,14 +5189,14 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": len(drop_any_pkt_list)}, } result_flag, log_msg = self.send_and_check_packets( - subcase1_queue_4_dic, self.dut_ports[0] + subcase1_queue_4_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received by queue 4, result {}".format(log_msg), ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule1[1], check_stats=False + port_id=self.sut_ports[0], rule_list=rule1[1], check_stats=False ) subcase1_all_receive_dic = { "scapy_str": drop_any_pkt_list, @@ -5207,14 +5207,14 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": len(drop_any_pkt_list)}, } result_flag, log_msg = self.send_and_check_packets( - subcase1_all_receive_dic, self.dut_ports[0] + subcase1_all_receive_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received, result {}".format(log_msg) ) # change the rule priority - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) rule2_list = [ "flow create 0 priority 0 ingress pattern any / end actions queue index 4 / end", "flow create 0 priority 1 ingress pattern any / end actions drop / end", @@ -5226,35 +5226,35 @@ class ICESwitchFilterTest(TestCase): all(rule2), "all rules should create successed, result {}".format(rule2) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") - self.pmd.wait_link_status_up(self.dut_ports[0]) + self.pmd.wait_link_status_up(self.sut_ports[0]) result_flag, log_msg = self.send_and_check_packets( - subcase1_drop_dic, self.dut_ports[0] + subcase1_drop_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be dropped, result {}".format(log_msg) ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule2[0], check_stats=False + port_id=self.sut_ports[0], rule_list=rule2[0], check_stats=False ) result_flag, log_msg = self.send_and_check_packets( - subcase1_drop_dic, self.dut_ports[0] + subcase1_drop_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be dropped, result {}".format(log_msg) ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule2[1], check_stats=False + port_id=self.sut_ports[0], rule_list=rule2[1], check_stats=False ) result_flag, log_msg = self.send_and_check_packets( - subcase1_all_receive_dic, self.dut_ports[0] + subcase1_all_receive_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received, result {}".format(log_msg) ) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) self.logger.info( "***********subcase 2: non-pipeline mode with other rule***********" @@ -5273,10 +5273,10 @@ class ICESwitchFilterTest(TestCase): all(rule3), "all rules should create successed, result {}".format(rule3) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") - self.pmd.wait_link_status_up(self.dut_ports[0]) + self.pmd.wait_link_status_up(self.sut_ports[0]) subcase2_drop_dic = { "scapy_str": pkts, "check_func": { @@ -5286,13 +5286,13 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": 0}, } result_flag, log_msg = self.send_and_check_packets( - subcase2_drop_dic, self.dut_ports[0] + subcase2_drop_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be dropped, result {}".format(log_msg) ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule3[0], check_stats=False + port_id=self.sut_ports[0], rule_list=rule3[0], check_stats=False ) subcase2_queue_2_dic = { "scapy_str": pkts, @@ -5303,13 +5303,13 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": len(pkts)}, } result_flag, log_msg = self.send_and_check_packets( - subcase2_queue_2_dic, self.dut_ports[0] + subcase2_queue_2_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received by queue 2, result {}".format(log_msg), ) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) rule4_list = [ "flow create 0 priority 1 ingress pattern any / end actions queue index 4 / end", "flow create 0 priority 0 ingress pattern eth / ipv4 src is 1.1.1.2 dst is 1.1.1.3 tos is 4 / udp src is 23 dst is 25 / end actions queue index 2 / end", @@ -5324,19 +5324,19 @@ class ICESwitchFilterTest(TestCase): all(rule4), "all rules should create successed, result {}".format(rule4) ) result = self.check_switch_filter_rule_list( - port_id=self.dut_ports[0], is_need_rss_rule=False, check_stats=False + port_id=self.sut_ports[0], is_need_rss_rule=False, check_stats=False ) self.verify(result, "expect rule create successed") - self.pmd.wait_link_status_up(self.dut_ports[0]) + self.pmd.wait_link_status_up(self.sut_ports[0]) result_flag, log_msg = self.send_and_check_packets( - subcase2_queue_2_dic, self.dut_ports[0] + subcase2_queue_2_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received by queue 2, result {}".format(log_msg), ) self.destroy_switch_filter_rule( - port_id=self.dut_ports[0], rule_list=rule4[0], check_stats=False + port_id=self.sut_ports[0], rule_list=rule4[0], check_stats=False ) subcase2_queue_4_dic = { "scapy_str": pkts, @@ -5347,16 +5347,16 @@ class ICESwitchFilterTest(TestCase): "expect_results": {"expect_pkts": len(pkts)}, } result_flag, log_msg = self.send_and_check_packets( - subcase2_queue_4_dic, self.dut_ports[0] + subcase2_queue_4_dic, self.sut_ports[0] ) self.verify( result_flag, "expect all pkts can be received by queue 2, result {}".format(log_msg), ) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) self.logger.info("***********subcase 3: pipeline mode***********") - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") self.launch_testpmd(is_non_pipeline=False) rule5_list = [ "flow create 0 priority 0 ingress pattern any / end actions drop / end", @@ -5370,7 +5370,7 @@ class ICESwitchFilterTest(TestCase): self.verify( not all(rule5), "all rules should create failed, result {}".format(rule5) ) - self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.sut_node.send_expect("flow flush 0", "testpmd> ", 15) rule6 = self.create_switch_filter_rule( rte_flow_pattern=rule5_list[2:], check_stats=False ) @@ -5386,11 +5386,11 @@ class ICESwitchFilterTest(TestCase): if self.running_case == "test_unsupported_pattern_in_OS_default_package": pass else: - self.dut.send_expect("flow flush %d" % self.dut_ports[0], "testpmd> ", 15) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("flow flush %d" % self.sut_ports[0], "testpmd> ", 15) + self.sut_node.send_expect("quit", "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_switch_filter_pppoe.py b/tests/TestSuite_ice_switch_filter_pppoe.py index 797029b3..60416085 100644 --- a/tests/TestSuite_ice_switch_filter_pppoe.py +++ b/tests/TestSuite_ice_switch_filter_pppoe.py @@ -12,8 +12,8 @@ from collections import OrderedDict from itertools import groupby import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, skip_unsupported_pkg from framework.utils import BLUE, GREEN, RED @@ -2828,7 +2828,7 @@ class ICESwitchFilterPPPOETest(TestCase): # modprobe vfio driver if driver == "vfio-pci": for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "vfio-pci": netdev.bind_driver(driver="vfio-pci") @@ -2836,13 +2836,13 @@ class ICESwitchFilterPPPOETest(TestCase): elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver = netdev.get_nic_driver() if driver != "igb_uio": netdev.bind_driver(driver="igb_uio") else: for port in ports: - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver @@ -2858,17 +2858,17 @@ class ICESwitchFilterPPPOETest(TestCase): self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"], "%s nic not support Intel® Ethernet 800 Series switch filter" % self.nic, ) - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - self.dut.send_expect("ifconfig %s up" % self.__tx_iface, "# ") - self.pkt = Packet() - self.pmd = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + self.sut_node.send_expect("ifconfig %s up" % self.__tx_iface, "# ") + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd = PmdOutput(self.sut_node) self.generate_file_with_fdir_rules() - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -2877,18 +2877,18 @@ class ICESwitchFilterPPPOETest(TestCase): self.reload_ice() def reload_ice(self): - self.dut.send_expect("rmmod ice", "# ", 15) - self.dut.send_expect("modprobe ice", "# ", 15) + self.sut_node.send_expect("rmmod ice", "# ", 15) + self.sut_node.send_expect("modprobe ice", "# ", 15) def generate_file_with_fdir_rules(self): """ generate file with fdir rules to make fdir table full, then test switch filter """ - pf_pci = self.dut.ports_info[0]["pci"] + pf_pci = self.sut_node.ports_info[0]["pci"] out = self.pmd.start_testpmd( "default", eal_param="-a %s --log-level=ice,7" % pf_pci ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.fdir_rule_number = self.pmd.get_max_rule_number(self, out) src_file = "dep/testpmd_cmds_rte_flow_fdir_rules" flows = open(src_file, mode="w") @@ -2906,8 +2906,8 @@ class ICESwitchFilterPPPOETest(TestCase): if rule_count > self.fdir_rule_number: break flows.close() - self.dut_file_dir = "/tmp" - self.dut.session.copy_file_to(src_file, self.dut_file_dir) + self.sut_file_dir = "/tmp" + self.sut_node.session.copy_file_to(src_file, self.sut_file_dir) self.fdir_file = "/tmp/testpmd_cmds_rte_flow_fdir_rules" def create_testpmd_command(self): @@ -2915,11 +2915,11 @@ class ICESwitchFilterPPPOETest(TestCase): Create testpmd command for non-pipeline mode """ # Prepare testpmd EAL and parameters - all_eal_param = self.dut.create_eal_parameters(cores="1S/4C/1T", ports=[0]) + all_eal_param = self.sut_node.create_eal_parameters(cores="1S/4C/1T", ports=[0]) command = ( self.path + all_eal_param + ' --log-level="ice,8" -- -i --rxq=16 --txq=16 ' ) - # command = "./%s/app/testpmd %s --log-level=\"ice,8\" -- -i %s" % (self.dut.target, all_eal_param, "--rxq=16 --txq=16") + # command = "./%s/app/testpmd %s --log-level=\"ice,8\" -- -i %s" % (self.sut_node.target, all_eal_param, "--rxq=16 --txq=16") return command def create_testpmd_command_pipeline_mode(self): @@ -2927,7 +2927,7 @@ class ICESwitchFilterPPPOETest(TestCase): Create testpmd command for pipeline mode """ # Prepare testpmd EAL and parameters - all_eal_param = self.dut.create_eal_parameters( + all_eal_param = self.sut_node.create_eal_parameters( cores="1S/4C/1T", ports=[0], port_options={0: "pipeline-mode-support=1"} ) command = ( @@ -2943,16 +2943,16 @@ class ICESwitchFilterPPPOETest(TestCase): command = self.create_testpmd_command() else: command = self.create_testpmd_command_pipeline_mode() - out = self.dut.send_expect(command, "testpmd> ", 300) - self.dut.send_expect("port config all rss all", "testpmd> ", 15) - self.dut.send_expect( + out = self.sut_node.send_expect(command, "testpmd> ", 300) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 15) + self.sut_node.send_expect( "port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ", 15, ) - self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 15) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def send_and_check_packets(self, dic, port): """ @@ -2960,13 +2960,13 @@ class ICESwitchFilterPPPOETest(TestCase): """ # Specify the port to use dic["check_func"]["param"]["expect_port"] = port - self.dut.send_expect("start", "testpmd> ", 15) + self.sut_node.send_expect("start", "testpmd> ", 15) time.sleep(2) # send packets - self.pkt.update_pkt(dic["scapy_str"]) - self.pkt.send_pkt(self.tester, tx_port=self.__tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(dic["scapy_str"]) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.__tx_iface, count=1, timeout=370) time.sleep(3) - out = self.dut.send_expect("stop", "testpmd> ", 15) + out = self.sut_node.send_expect("stop", "testpmd> ", 15) result_flag, log_msg = dic["check_func"]["func"]( out, dic["check_func"]["param"], dic["expect_results"] ) @@ -2976,12 +2976,12 @@ class ICESwitchFilterPPPOETest(TestCase): """ general packets processing workflow. """ - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # send packets - self.pkt.update_pkt(packets_list) - self.pkt.send_pkt(self.tester, tx_port=self.__tx_iface, count=1, timeout=370) + self.scapy_pkt_builder.update_pkt(packets_list) + self.scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.__tx_iface, count=1, timeout=370) time.sleep(3) - out = self.dut.send_expect("stop", "testpmd> ", 15) + out = self.sut_node.send_expect("stop", "testpmd> ", 15) p = re.compile(r"Forward Stats for RX Port= \d+/Queue=(\s?\d+)") res = p.findall(out) default_queue = [int(i) for i in res] @@ -3025,7 +3025,7 @@ class ICESwitchFilterPPPOETest(TestCase): rule_list = [] if isinstance(rte_flow_pattern, list): for rule in rte_flow_pattern: - out = self.dut.send_expect(rule, "testpmd> ") # create a rule + out = self.sut_node.send_expect(rule, "testpmd> ") # create a rule if s not in out: rule_list.append(False) else: @@ -3035,7 +3035,7 @@ class ICESwitchFilterPPPOETest(TestCase): else: rule_list.append(False) elif isinstance(rte_flow_pattern, str): - out = self.dut.send_expect(rte_flow_pattern, "testpmd> ") # create a rule + out = self.sut_node.send_expect(rte_flow_pattern, "testpmd> ") # create a rule if s not in out: rule_list.append(False) else: @@ -3073,7 +3073,7 @@ class ICESwitchFilterPPPOETest(TestCase): for i in rte_flow_pattern: length = len(i) rule_rep = i[0:5] + "validate" + i[11:length] - out = self.dut.send_expect(rule_rep, "testpmd> ") # validate a rule + out = self.sut_node.send_expect(rule_rep, "testpmd> ") # validate a rule if (p in out) and ("Failed" not in out): rule_list.append(True) else: @@ -3081,7 +3081,7 @@ class ICESwitchFilterPPPOETest(TestCase): elif isinstance(rte_flow_pattern, str): length = len(rte_flow_pattern) rule_rep = rte_flow_pattern[0:5] + "validate" + rte_flow_pattern[11:length] - out = self.dut.send_expect(rule_rep, "testpmd> ") # validate a rule + out = self.sut_node.send_expect(rule_rep, "testpmd> ") # validate a rule if (p in out) and ("Failed" not in out): rule_list.append(True) else: @@ -3119,7 +3119,7 @@ class ICESwitchFilterPPPOETest(TestCase): """ check the rules in list identical to ones in rule_list """ - out = self.dut.send_expect("flow list %d" % port_id, "testpmd> ", 15) + out = self.sut_node.send_expect("flow list %d" % port_id, "testpmd> ", 15) p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule") m = p.search(out) if not m: @@ -3160,7 +3160,7 @@ class ICESwitchFilterPPPOETest(TestCase): destroy_list = [] if isinstance(rule_list, list): for i in rule_list: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow destroy %s rule %s" % (port_id, i), "testpmd> ", 15 ) m = p.search(out) @@ -3169,7 +3169,7 @@ class ICESwitchFilterPPPOETest(TestCase): else: destroy_list.append(False) else: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow destroy %s rule %s" % (port_id, rule_list), "testpmd> ", 15 ) m = p.search(out) @@ -3312,7 +3312,7 @@ class ICESwitchFilterPPPOETest(TestCase): if not result_flag: continue result_flag, overall_result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], [], is_non_pipeline, is_need_rss_rule, @@ -3329,7 +3329,7 @@ class ICESwitchFilterPPPOETest(TestCase): if not result_flag: continue result_flag, overall_result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], rule_list, is_non_pipeline, is_need_rss_rule, @@ -3342,7 +3342,7 @@ class ICESwitchFilterPPPOETest(TestCase): # send matched packets and check matched_dic = tv["matched"] result_flag, log_msg = self.send_and_check_packets( - matched_dic, self.dut_ports[0] + matched_dic, self.sut_ports[0] ) overall_result = self.save_results( pattern_name, "matched packets", result_flag, log_msg, overall_result @@ -3351,7 +3351,7 @@ class ICESwitchFilterPPPOETest(TestCase): mismatched_dic = tv["mismatched"] if len(list(mismatched_dic.keys())) != 0: result_flag, log_msg = self.send_and_check_packets( - mismatched_dic, self.dut_ports[0] + mismatched_dic, self.sut_ports[0] ) overall_result = self.save_results( pattern_name, "mismatched", result_flag, log_msg, overall_result @@ -3363,7 +3363,7 @@ class ICESwitchFilterPPPOETest(TestCase): if not result_flag: continue result_flag, overall_result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], [], is_non_pipeline, is_need_rss_rule, @@ -3379,7 +3379,7 @@ class ICESwitchFilterPPPOETest(TestCase): "func" ] result_flag, log_msg = self.send_and_check_packets( - check_destroy_dict, self.dut_ports[0] + check_destroy_dict, self.sut_ports[0] ) overall_result = self.save_results( pattern_name, @@ -3404,7 +3404,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(False) # create a pppoe rss rule to make the pppoe packets have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3417,7 +3417,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(True) # create a pppoe rss rule to make the pppoe packets have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3517,7 +3517,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(True) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3528,7 +3528,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(True) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3539,7 +3539,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(True) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3550,7 +3550,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(True) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3886,7 +3886,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(False) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3904,7 +3904,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(False) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3924,7 +3924,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(False) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3944,7 +3944,7 @@ class ICESwitchFilterPPPOETest(TestCase): # launch testpmd self.launch_testpmd(False) # create a pppoe rss rule to make the pppoe control have hash value, and queue group action work - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / pppoes / end actions rss types pppoe end key_len 0 queues end / end", "testpmd> ", 15, @@ -3986,7 +3986,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4002,7 +4002,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4021,7 +4021,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4037,7 +4037,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4056,7 +4056,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4072,7 +4072,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4087,7 +4087,7 @@ class ICESwitchFilterPPPOETest(TestCase): rule_list = self.create_switch_filter_rule(rule, check_stats=False) self.verify(all(rule_list), "some rules create failed, result %s" % rule_list) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4102,7 +4102,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should create failed, result %s" % rule_list_dupli, ) result_dupli = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4112,7 +4112,7 @@ class ICESwitchFilterPPPOETest(TestCase): "the rule list is not the same. expect %s, result %s" % (rule_list, result_dupli), ) - self.dut.send_expect("flow destroy 0 rule %s" % rule_list[0], "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule %s" % rule_list[0], "testpmd> ", 15) # conflicted rules rule = "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions queue index 1 / end" @@ -4122,7 +4122,7 @@ class ICESwitchFilterPPPOETest(TestCase): "some rules create failed, result %s, rule %s" % (rule_list, rule), ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4144,7 +4144,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should create failed, result %s" % rule_list2, ) result1 = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4154,7 +4154,7 @@ class ICESwitchFilterPPPOETest(TestCase): "the rule list is not the same. expect %s, result %s" % (rule_list, result1), ) - self.dut.send_expect("flow destroy 0 rule %s" % rule_list[0], "testpmd> ", 15) + self.sut_node.send_expect("flow destroy 0 rule %s" % rule_list[0], "testpmd> ", 15) # multiple actions rule_list = self.validate_switch_filter_rule( @@ -4165,7 +4165,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4181,7 +4181,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4200,7 +4200,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4216,7 +4216,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4228,7 +4228,7 @@ class ICESwitchFilterPPPOETest(TestCase): # delete a non-existing rule result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4237,7 +4237,7 @@ class ICESwitchFilterPPPOETest(TestCase): result == [], "the rule list is not the same. expect %s, result %s" % ([], result), ) - out = self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ", 15) + out = self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd> ", 15) self.verify("error" not in out, "It should be no error message.") # add long switch rule @@ -4248,7 +4248,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4262,7 +4262,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4291,7 +4291,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4307,7 +4307,7 @@ class ICESwitchFilterPPPOETest(TestCase): not any(rule_list), "all rules should create failed, result %s" % rule_list ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4326,7 +4326,7 @@ class ICESwitchFilterPPPOETest(TestCase): "all rules should validate failed, result %s" % rule_list, ) result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4336,7 +4336,7 @@ class ICESwitchFilterPPPOETest(TestCase): "the rule list is not the same. expect %s, result %s" % ([], result), ) result = self.check_switch_filter_rule_list( - self.dut_ports[1], + self.sut_ports[1], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4353,7 +4353,7 @@ class ICESwitchFilterPPPOETest(TestCase): ) # check there is no rule listed result = self.check_switch_filter_rule_list( - self.dut_ports[0], + self.sut_ports[0], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4363,7 +4363,7 @@ class ICESwitchFilterPPPOETest(TestCase): "the rule list is not the same. expect %s, result %s" % (rule_list, result), ) result = self.check_switch_filter_rule_list( - self.dut_ports[1], + self.sut_ports[1], is_non_pipeline=False, is_need_rss_rule=False, check_stats=False, @@ -4378,11 +4378,11 @@ class ICESwitchFilterPPPOETest(TestCase): Run after each test case. """ # destroy all the rules on port 0 - self.dut.send_expect("flow flush %d" % self.dut_ports[0], "testpmd> ", 300) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("flow flush %d" % self.sut_ports[0], "testpmd> ", 300) + self.sut_node.send_expect("quit", "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ice_vf_support_multicast_address.py b/tests/TestSuite_ice_vf_support_multicast_address.py index cd088ad4..25e5e4c7 100644 --- a/tests/TestSuite_ice_vf_support_multicast_address.py +++ b/tests/TestSuite_ice_vf_support_multicast_address.py @@ -4,8 +4,8 @@ import re -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase vf0_mac = "00:11:22:33:44:55" @@ -20,16 +20,16 @@ class TestICEVfSupportMulticastAdress(TestCase): """ Prerequisite steps for each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") - self.used_dut_port = self.dut_ports[0] - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") + self.used_sut_port = self.sut_ports[0] + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] self.vf_flag = False self.create_iavf() - self.pmd_output = PmdOutput(self.dut) + self.pmd_output = PmdOutput(self.sut_node) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) def set_up(self): """ @@ -37,17 +37,17 @@ class TestICEVfSupportMulticastAdress(TestCase): """ if self.running_case == "test_maxnum_multicast_address_with_vfs_trust_off": # set two VFs trust off - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 0 trust off" % self.pf_interface, "# " ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 1 trust off" % self.pf_interface, "# " ) else: - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 0 trust on" % self.pf_interface, "# " ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf 1 trust on" % self.pf_interface, "# " ) self.launch_testpmd() @@ -55,36 +55,36 @@ class TestICEVfSupportMulticastAdress(TestCase): def create_iavf(self): # Generate 2 VFs on PF if self.vf_flag is False: - self.dut.bind_interfaces_linux("ice") + self.sut_node.bind_interfaces_linux("ice") # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state( + self.default_stats = self.sut_node.get_priv_flags_state( self.pf_interface, self.flag ) if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True try: for port in self.sriov_vfs_port: port.bind_driver(self.drivername) - self.dut.send_expect("ifconfig %s up" % self.pf_interface, "# ") - self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s up" % self.pf_interface, "# ") + self.sut_node.send_expect( "ethtool --set-priv-flags %s vf-true-promisc-support on" % self.pf_interface, "# ", ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf_interface, vf0_mac), "# " ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 1 mac %s" % (self.pf_interface, vf1_mac), "# " ) except Exception as e: @@ -93,7 +93,7 @@ class TestICEVfSupportMulticastAdress(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def launch_testpmd(self): @@ -175,10 +175,10 @@ class TestICEVfSupportMulticastAdress(TestCase): hex(num_end)[2:], num_end ) pkts.append(pkt_last) - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) def test_one_multicast_address(self): # send 4 packets @@ -199,10 +199,10 @@ class TestICEVfSupportMulticastAdress(TestCase): % vf1_mac ) pkts = [pkt1, pkt2, pkt3, pkt4] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_1 = self.check_pkts_received() self.verify(len(out_1) == 2, "Wrong number of pkts received") self.verify(("0", vf0_mac) in out_1, "pkt3 can't be received by port 0") @@ -212,7 +212,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("mcast_addr add 0 %s" % mul_mac_0) self.check_ports_multicast_address_number(1, 0) # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_2 = self.check_pkts_received() self.verify(len(out_2) == 3, "Wrong number of pkts received") self.verify(("0", vf0_mac) in out_2, "pkt3 can't be received by port 0") @@ -223,7 +223,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("mcast_addr remove 0 %s" % mul_mac_0) self.check_ports_multicast_address_number(0, 0) # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_3 = self.check_pkts_received() self.verify(len(out_3) == 2, "Wrong number of pkts received") self.verify(("0", vf0_mac) in out_3, "pkt3 can't be received by port 0") @@ -253,10 +253,10 @@ class TestICEVfSupportMulticastAdress(TestCase): % vf1_mac ) pkts = [pkt1, pkt2, pkt3, pkt4] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_1 = self.check_pkts_received() self.verify(len(out_1) == 4, "Wrong number of pkts received") self.verify(("0", vf0_mac) in out_1, "pkt3 can't be received by port 0") @@ -270,7 +270,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("mcast_addr remove 0 %s" % mul_mac_0) self.check_ports_multicast_address_number(1, 0) # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_2 = self.check_pkts_received() self.verify(len(out_2) == 3, "Wrong number of pkts received") self.verify(("0", vf0_mac) in out_2, "pkt3 can't be received by port 0") @@ -290,10 +290,10 @@ class TestICEVfSupportMulticastAdress(TestCase): pkt2 = 'Ether(dst="33:33:00:00:00:02")/IP(src="224.0.0.2")/UDP(sport=22,dport=23)/("X"*480)' pkt3 = 'Ether(dst="33:33:00:00:00:03")/IP(src="224.0.0.3")/UDP(sport=22,dport=23)/("X"*480)' pkts = [pkt1, pkt2, pkt3] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_1 = self.check_pkts_received() self.verify(len(out_1) == 4, "Wrong number of pkts received") self.verify( @@ -314,7 +314,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("mcast_addr remove 1 33:33:00:00:00:03") self.check_ports_multicast_address_number(1, 1) # send 3 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_2 = self.check_pkts_received() self.verify(len(out_2) == 2, "Wrong number of pkts received") self.verify( @@ -562,10 +562,10 @@ class TestICEVfSupportMulticastAdress(TestCase): % vf0_wrong_mac ) pkts = [pkt1, pkt2, pkt3, pkt4, pkt5, pkt6, pkt7] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_1 = self.check_pkts_received() self.verify(len(output_1) == 10, "Wrong number of pkts received") self.verify(("0", vf0_mac) in output_1, "pkt5 can't be received by port 0") @@ -583,7 +583,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("set promisc all on") self.pmd_output.execute_cmd("set allmulti all off") # send 5 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_2 = self.check_pkts_received() self.verify(len(output_2) == 6, "Wrong number of pkts received") self.verify( @@ -601,18 +601,18 @@ class TestICEVfSupportMulticastAdress(TestCase): def test_negative_case(self): # send one packet - p = Packet() - p.append_pkt( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.append_pkt( 'Ether(dst="33:33:00:00:00:40")/IP(src="224.0.0.1")/UDP(sport=22,dport=23)/("X"*480)' ) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_1 = self.check_pkts_received() self.verify(len(output_1) == 0, "Wrong number of pkts received") # add a multicast address self.pmd_output.execute_cmd("mcast_addr add 0 33:33:00:00:00:40") # send one packet - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_2 = self.check_pkts_received() self.verify(len(output_2) == 1, "Wrong number of pkts received") self.verify( @@ -626,7 +626,7 @@ class TestICEVfSupportMulticastAdress(TestCase): "add a same multicast address successfully", ) # send one packet - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_3 = self.check_pkts_received() self.verify(len(output_3) == 1, "Wrong number of pkts received") self.verify( @@ -640,7 +640,7 @@ class TestICEVfSupportMulticastAdress(TestCase): "remove nonexistent multicast address successfully", ) # send one packet - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_4 = self.check_pkts_received() self.verify(len(output_4) == 1, "Wrong number of pkts received") self.verify( @@ -654,7 +654,7 @@ class TestICEVfSupportMulticastAdress(TestCase): "add wrong multicast address successfully", ) # send one packet - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_5 = self.check_pkts_received() self.verify(len(output_5) == 1, "Wrong number of pkts received") self.verify( @@ -664,7 +664,7 @@ class TestICEVfSupportMulticastAdress(TestCase): # remove the multicast address self.pmd_output.execute_cmd("mcast_addr remove 0 33:33:00:00:00:40") # send one packet - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) output_6 = self.check_pkts_received() self.verify(len(output_6) == 0, "Wrong number of pkts received") @@ -687,10 +687,10 @@ class TestICEVfSupportMulticastAdress(TestCase): % mul_mac_1 ) pkts = [pkt1, pkt2, pkt3, pkt4] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_1 = self.check_pkts_received() self.verify(len(out_1) == 0, "pkt1-4 can be received by any port") @@ -698,7 +698,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("mcast_addr add 0 %s" % mul_mac_0) self.check_ports_multicast_address_number(1, 0) # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_2 = self.check_pkts_received() self.verify(len(out_2) == 1, "Wrong number of pkts received") self.verify(("0", mul_mac_0) in out_2, "pkt1 can't be received by port 0") @@ -707,7 +707,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("vlan set filter on 0") self.pmd_output.execute_cmd("rx_vlan add 1 0") # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_3 = self.check_pkts_received() self.verify(len(out_3) == 2, "Wrong number of pkts received") self.verify(("0", mul_mac_0) in out_3, "pkt1-2 can't be received by port 0") @@ -718,7 +718,7 @@ class TestICEVfSupportMulticastAdress(TestCase): # remove the vlan filter self.pmd_output.execute_cmd("rx_vlan rm 1 0") # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_4 = self.check_pkts_received() self.verify(len(out_4) == 1, "Wrong number of pkts received") self.verify(("0", mul_mac_0) in out_4, "pkt1 can't be received by port 0") @@ -727,7 +727,7 @@ class TestICEVfSupportMulticastAdress(TestCase): self.pmd_output.execute_cmd("mcast_addr remove 0 %s" % mul_mac_0) self.check_ports_multicast_address_number(0, 0) # send 4 packets - p.send_pkt(self.tester, tx_port=self.tester_itf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf) out_5 = self.check_pkts_received() self.verify(len(out_5) == 0, "pkt1-4 can be received by any port") @@ -735,16 +735,16 @@ class TestICEVfSupportMulticastAdress(TestCase): """ Run after each test case. """ - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_iavf() if self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf_interface, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_ieee1588.py b/tests/TestSuite_ieee1588.py index 300d85d4..519f394c 100644 --- a/tests/TestSuite_ieee1588.py +++ b/tests/TestSuite_ieee1588.py @@ -11,8 +11,8 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase DEV_TX_OFFLOAD_MULTI_SEGS = "0x00008000" @@ -24,16 +24,16 @@ class TestIeee1588(TestCase): Run at the start of each test suite. IEEE1588 Prerequisites """ - dutPorts = self.dut.get_ports() - self.verify(len(dutPorts) > 0, "No ports found for " + self.nic) + sutPorts = self.sut_node.get_ports() + self.verify(len(sutPorts) > 0, "No ports found for " + self.nic) # recompile the package with extra options of support IEEE1588. - self.dut.skip_setup = False - self.dut.build_install_dpdk( + self.sut_node.skip_setup = False + self.sut_node.build_install_dpdk( self.target, extra_options="-Dc_args=-DRTE_LIBRTE_IEEE1588" ) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) # For IEEE1588, the full-feature tx path needs to be enabled. # Enabling any tx offload will force DPDK utilize full tx path. # Enabling multiple segment offload is more reasonable for user cases. @@ -51,32 +51,32 @@ class TestIeee1588(TestCase): """ IEEE1588 Enable test case. """ - self.dut.send_expect("set fwd ieee1588", "testpmd> ", 10) + self.sut_node.send_expect("set fwd ieee1588", "testpmd> ", 10) if self.nic in ["cavium_a063", "cavium_a064"]: - self.dut.send_expect("set port 0 ptype_mask 0xf", "testpmd> ", 10) + self.sut_node.send_expect("set port 0 ptype_mask 0xf", "testpmd> ", 10) # Waiting for 'testpmd> ' Fails due to log messages, "Received non PTP # packet", in the output - self.dut.send_expect("start", ">", 10) + self.sut_node.send_expect("start", ">", 10) # Allow the output from the "start" command to finish before looking # for a regexp in expect time.sleep(1) # use the first port on that self.nic - dutPorts = self.dut.get_ports() - mac = self.dut.get_mac_address(dutPorts[0]) - port = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(port) + sutPorts = self.sut_node.get_ports() + mac = self.sut_node.get_mac_address(sutPorts[0]) + port = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(port) - self.send_session = self.tester.create_session("send_session") + self.send_session = self.tg_node.create_session("send_session") self.send_session.send_expect( "tcpdump -i %s -e ether src %s" % (itf, mac), "tcpdump", 20 ) - setattr(self.send_session, "tmp_file", self.tester.tmp_file) - setattr(self.send_session, "tmp_file", self.tester.get_session_output) - pkt = Packet(pkt_type="TIMESYNC") - pkt.config_layer("ether", {"dst": mac}) - pkt.send_pkt(self.tester, tx_port=itf) + setattr(self.send_session, "tmp_file", self.tg_node.tmp_file) + setattr(self.send_session, "tmp_file", self.tg_node.get_session_output) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="TIMESYNC") + scapy_pkt_builder.config_layer("ether", {"dst": mac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) time.sleep(1) out = self.send_session.get_session_before(timeout=20) @@ -86,8 +86,8 @@ class TestIeee1588(TestCase): self.verify("0x88f7" in out, "Ether type is not PTP") time.sleep(1) - out = self.dut.get_session_output() - self.dut.send_expect("stop", "testpmd> ") + out = self.sut_node.get_session_output() + self.sut_node.send_expect("stop", "testpmd> ") text = utils.regexp(out, "(.*) by hardware") self.verify("IEEE1588 PTP V2 SYNC" in text, "Not filtered " + text) @@ -110,29 +110,29 @@ class TestIeee1588(TestCase): """ IEEE1588 Disable test case. """ - self.dut.send_expect("stop", "testpmd> ", 10) + self.sut_node.send_expect("stop", "testpmd> ", 10) time.sleep(3) # use the first port on that self.nic - dutPorts = self.dut.get_ports() - mac = self.dut.get_mac_address(dutPorts[0]) - port = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(port) - - self.tester.scapy_background() - self.tester.scapy_append('p = sniff(iface="%s", count=2, timeout=1)' % itf) - self.tester.scapy_append("RESULT = p[1].summary()") - - self.tester.scapy_foreground() - self.tester.scapy_append('nutmac="%s"' % mac) - self.tester.scapy_append( + sutPorts = self.sut_node.get_ports() + mac = self.sut_node.get_mac_address(sutPorts[0]) + port = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(port) + + self.tg_node.scapy_background() + self.tg_node.scapy_append('p = sniff(iface="%s", count=2, timeout=1)' % itf) + self.tg_node.scapy_append("RESULT = p[1].summary()") + + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('nutmac="%s"' % mac) + self.tg_node.scapy_append( 'sendp([Ether(dst=nutmac,type=0x88f7)/"\\x00\\x02"], iface="%s")' % itf ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(2) - out = self.tester.scapy_get_result() + out = self.tg_node.scapy_get_result() self.verify("Ether" not in out, "Ether type is not PTP") def tear_down(self): @@ -145,7 +145,7 @@ class TestIeee1588(TestCase): """ Run after each test suite. """ - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) # recompile the package with default options. - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) diff --git a/tests/TestSuite_inline_ipsec.py b/tests/TestSuite_inline_ipsec.py index 474e616a..5712c268 100644 --- a/tests/TestSuite_inline_ipsec.py +++ b/tests/TestSuite_inline_ipsec.py @@ -39,40 +39,40 @@ class TestInlineIpsec(TestCase): self.drivername in ["vfio-pci"], "%s drivername not support" % self.drivername, ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + cores = self.sut_node.get_core_list("1S/4C/1T") self.coremask = utils.create_mask(cores) # get test port info - self.rxport = self.tester.get_local_port(1) - self.txport = self.tester.get_local_port(0) - self.rxItf = self.tester.get_interface(self.rxport) - self.txItf = self.tester.get_interface(self.txport) + self.rxport = self.tg_node.get_local_port(1) + self.txport = self.tg_node.get_local_port(0) + self.rxItf = self.tg_node.get_interface(self.rxport) + self.txItf = self.tg_node.get_interface(self.txport) - self.rx_src = self.tester.get_mac(self.rxport) - self.tx_dst = self.dut.get_mac_address(self.dut_ports[0]) + self.rx_src = self.tg_node.get_mac(self.rxport) + self.tx_dst = self.sut_node.get_mac_address(self.sut_ports[0]) - # get dut port pci - self.portpci_0 = self.dut.get_port_pci(self.dut_ports[0]) - self.portpci_1 = self.dut.get_port_pci(self.dut_ports[1]) + # get SUT port pci + self.portpci_0 = self.sut_node.get_port_pci(self.sut_ports[0]) + self.portpci_1 = self.sut_node.get_port_pci(self.sut_ports[1]) - # enable tester mtu - self.rxnetobj = self.tester.ports_info[self.rxport]["port"] + # enable TG mtu + self.rxnetobj = self.tg_node.ports_info[self.rxport]["port"] self.rxnetobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU + 100) - self.txnetobj = self.tester.ports_info[self.txport]["port"] + self.txnetobj = self.tg_node.ports_info[self.txport]["port"] self.txnetobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU + 100) - self.path = self.dut.apps_name["ipsec-secgw"] + self.path = self.sut_node.apps_name["ipsec-secgw"] # add print code in IPSEC app sedcmd = r"""sed -i -e 's/if (nb_rx > 0)/if (nb_rx > 0) {/g' -e '/\/\* dequeue and process completed crypto-ops \*\//i\\t\t\t}' -e '/process_pkts(qconf, pkts, nb_rx, portid);/i\\t\t\t\tprintf("[debug]receive %llu packet in rxqueueid=%llu\\n",(unsigned long long)nb_rx, (unsigned long long)queueid);' examples/ipsec-secgw/ipsec-secgw.c""" - self.dut.send_expect(sedcmd, "#", 60) + self.sut_node.send_expect(sedcmd, "#", 60) # build sample app - out = self.dut.build_dpdk_apps("./examples/ipsec-secgw") + out = self.sut_node.build_dpdk_apps("./examples/ipsec-secgw") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") - self.eal_para = self.dut.create_eal_parameters(cores=[20, 21]) + self.eal_para = self.sut_node.create_eal_parameters(cores=[20, 21]) self.cfg_prepare() @@ -141,12 +141,12 @@ class TestInlineIpsec(TestCase): def set_cfg(self, filename, cfg): """ - open file and write cfg, scp it to dut base directory + open file and write cfg, scp it to SUT base directory """ for i in cfg: with open(filename, "w") as f: f.write(cfg) - self.dut.session.copy_file_to(filename, self.dut.base_dir) + self.sut_node.session.copy_file_to(filename, self.sut_node.base_dir) def send_encryption_package( self, @@ -190,9 +190,9 @@ class TestInlineIpsec(TestCase): eth_e = Ether() / e eth_e.src = self.rx_src eth_e.dst = self.tx_dst - session_send = self.tester.create_session(name="send_encryption_package") + session_send = self.tg_node.create_session(name="send_encryption_package") sendp(eth_e, iface=intf, count=count) - self.tester.destroy_session(session_send) + self.tg_node.destroy_session(session_send) return payload, p.src, p.dst def Ipsec_Encryption( @@ -228,9 +228,9 @@ class TestInlineIpsec(TestCase): % (self.eal_para, config, file_name) ) - self.dut.send_expect(cmd, "IPSEC", 60) + self.sut_node.send_expect(cmd, "IPSEC", 60) - session_receive = self.tester.create_session(name="receive_encryption_package") + session_receive = self.tg_node.create_session(name="receive_encryption_package") sa_gcm = ( r"sa_gcm=SecurityAssociation(ESP,spi=%s,crypt_algo='AES-GCM',crypt_key=b'\x2b\x7e\x15\x16\x28\xae\xd2\xa6\xab\xf7\x15\x88\x09\xcf\x4f\x3d\xde\xad\xbe\xef',auth_algo='NULL',auth_key=None,tunnel_header=IP(src='172.16.1.5',dst='172.16.2.5'))" % receive_spi @@ -250,7 +250,7 @@ class TestInlineIpsec(TestCase): session_receive.send_expect("pkts", "", 30) out = session_receive.send_expect("pkts[0]['IP'] ", ">>>", 10) else: - session_receive2 = self.tester.create_session( + session_receive2 = self.tg_node.create_session( name="receive_encryption_package2" ) session_receive2.send_expect("tcpdump -Xvvvi %s -c 1" % rxItf, "", 10) @@ -263,7 +263,7 @@ class TestInlineIpsec(TestCase): p = re.compile(": ESP\(spi=0x\w+,seq=0x\w+\),") res = p.search(rev) self.verify(res, "encrypt failed, tcpdump get %s" % rev) - self.tester.destroy_session(session_receive2) + self.tg_node.destroy_session(session_receive2) session_receive.send_expect("pkts", "", 30) session_receive.send_expect(sa_gcm, ">>>", 10) time.sleep(2) @@ -286,7 +286,7 @@ class TestInlineIpsec(TestCase): else: self.verify(send_package[0] not in out, "The function is not in effect") session_receive.send_expect("quit()", "#", 10) - self.tester.destroy_session(session_receive) + self.tg_node.destroy_session(session_receive) def test_Ipsec_Encryption(self): """ @@ -297,7 +297,7 @@ class TestInlineIpsec(TestCase): self.Ipsec_Encryption( config, "/root/dpdk/enc.cfg", self.txItf, self.rxItf, paysize ) - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_Ipsec_Encryption_Jumboframe(self): """ @@ -313,7 +313,7 @@ class TestInlineIpsec(TestCase): paysize, ETHER_JUMBO_FRAME_MTU, ) - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_Ipsec_Encryption_Rss(self): """ @@ -329,10 +329,10 @@ class TestInlineIpsec(TestCase): receive_spi=1002, inner_dst="192.168.102.10", ) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() verifycode = "receive 1 packet in rxqueueid=1" self.verify(verifycode in out, "rxqueueid error") - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_IPSec_Decryption(self): """ @@ -349,7 +349,7 @@ class TestInlineIpsec(TestCase): do_encrypt=True, count=2, ) - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_IPSec_Decryption_Jumboframe(self): """ @@ -367,7 +367,7 @@ class TestInlineIpsec(TestCase): do_encrypt=True, count=2, ) - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_Ipsec_Decryption_Rss(self): """ @@ -386,10 +386,10 @@ class TestInlineIpsec(TestCase): sa_src="172.16.21.25", sa_dst="172.16.22.25", ) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() verifycode = "receive 1 packet in rxqueueid=1" self.verify(verifycode in out, "rxqueueid error") - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_Ipsec_Decryption_wrongkey(self): """ @@ -407,11 +407,11 @@ class TestInlineIpsec(TestCase): verify=False, count=2, ) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() verifycode = "IPSEC_ESP: esp_inbound_post\(\) failed crypto op" l = re.findall(verifycode, out) self.verify(len(l) == 2, "Ipsec Decryption wrongkey failed") - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def test_Ipsec_Encryption_Decryption(self): """ @@ -422,8 +422,8 @@ class TestInlineIpsec(TestCase): + " %s --vdev 'crypto_null' --log-level 8 --socket-mem 1024,1 -- -p 0xf -P -u 0x2 --config='%s' -f %s" % (self.eal_para, "(0,0,21),(1,0,21)", "/root/dpdk/enc_dec.cfg") ) - self.dut.send_expect(cmd, "IPSEC", 60) - session_receive = self.tester.create_session(name="receive_encryption_package") + self.sut_node.send_expect(cmd, "IPSEC", 60) + session_receive = self.tg_node.create_session(name="receive_encryption_package") sa_gcm = r"sa_gcm=SecurityAssociation(ESP, spi=1005,crypt_algo='AES-GCM',crypt_key=b'\x2b\x7e\x15\x16\x28\xae\xd2\xa6\xab\xf7\x15\x88\x09\xcf\x4f\x3d\xde\xad\xbe\xef',auth_algo='NULL', auth_key=None,tunnel_header=IP(src='172.16.1.5', dst='172.16.2.5'))" session_receive.send_expect("scapy", ">>>", 60) @@ -431,7 +431,7 @@ class TestInlineIpsec(TestCase): session_receive.send_expect( "pkts=sniff(iface='%s',count=3,timeout=30)" % self.rxItf, "", 60 ) - session_receive2 = self.tester.create_session( + session_receive2 = self.tg_node.create_session( name="receive_encryption_package2" ) @@ -465,7 +465,7 @@ class TestInlineIpsec(TestCase): eth_e2 = Ether() / e2 eth_e2.src = self.rx_src eth_e2.dst = self.tx_dst - session_receive3 = self.tester.create_session( + session_receive3 = self.tg_node.create_session( "check_forward_encryption_package" ) session_receive3.send_expect("tcpdump -Xvvvi %s -c 1" % self.rxItf, "", 30) @@ -487,16 +487,16 @@ class TestInlineIpsec(TestCase): self.verify( payload in out, "The package is not received. Please check the package" ) - self.tester.destroy_session(session_receive) - self.tester.destroy_session(session_receive2) - self.tester.destroy_session(session_receive3) + self.tg_node.destroy_session(session_receive) + self.tg_node.destroy_session(session_receive2) + self.tg_node.destroy_session(session_receive3) def tear_down(self): """ Run after each test case. """ - self.tester.send_expect("killall tcpdump", "#", 5) - self.dut.kill_all() + self.tg_node.send_expect("killall tcpdump", "#", 5) + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_interrupt_pmd.py b/tests/TestSuite_interrupt_pmd.py index 2b77e052..eecc68ad 100644 --- a/tests/TestSuite_interrupt_pmd.py +++ b/tests/TestSuite_interrupt_pmd.py @@ -20,13 +20,13 @@ class TestInterruptPmd(TestCase): Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/4C/1T") - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + cores = self.sut_node.get_core_list("1S/4C/1T") + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") self.coremask = utils.create_mask(cores) - self.path = self.dut.apps_name["l3fwd-power"] + self.path = self.sut_node.apps_name["l3fwd-power"] self.trafficFlow = { "Flow1": [[0, 0, 1], [1, 0, 2]], @@ -50,22 +50,22 @@ class TestInterruptPmd(TestCase): ], } # build sample app - out = self.dut.build_dpdk_apps("./examples/l3fwd-power") + out = self.sut_node.build_dpdk_apps("./examples/l3fwd-power") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") self.default_driver = self.get_nic_driver() test_driver = "vfio-pci" if test_driver != self.default_driver: - self.dut.send_expect("modprobe %s" % test_driver, "#") + self.sut_node.send_expect("modprobe %s" % test_driver, "#") self.set_nic_driver(test_driver) def get_nic_driver(self, port_id=0): - port = self.dut.ports_info[port_id]["port"] + port = self.sut_node.ports_info[port_id]["port"] return port.get_nic_driver() def set_nic_driver(self, set_driver="vfio-pci"): - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] driver = port.get_nic_driver() if driver != set_driver: port.bind_driver(driver=set_driver) @@ -81,24 +81,24 @@ class TestInterruptPmd(TestCase): self.path, self.eal_para, ) - self.dut.send_expect(cmd, "L3FWD_POWER", 60) + self.sut_node.send_expect(cmd, "L3FWD_POWER", 60) portQueueLcore = self.trafficFlow["Flow1"] self.verifier_result(2, 2, portQueueLcore) - self.dut.kill_all() + self.sut_node.kill_all() cores = list(range(6)) - eal_para = self.dut.create_eal_parameters(cores=cores) + eal_para = self.sut_node.create_eal_parameters(cores=cores) cmd = ( "%s %s -- -p 0x3 -P --config='(0,0,0),(0,1,1),(0,2,2),(0,3,3),(0,4,4)' " % (self.path, eal_para) ) - self.dut.send_expect(cmd, "L3FWD_POWER", 120) + self.sut_node.send_expect(cmd, "L3FWD_POWER", 120) portQueueLcore = self.trafficFlow["Flow2"] self.verifier_result(20, 1, portQueueLcore) - self.dut.kill_all() + self.sut_node.kill_all() cores = list(range(24)) - eal_para = self.dut.create_eal_parameters(cores=cores) + eal_para = self.sut_node.create_eal_parameters(cores=cores) cmd = ( "%s %s -- -p 0x3 -P --config='(0,0,0),(0,1,1),(0,2,2),(0,3,3),\ (0,4,4),(0,5,5),(0,6,6),(0,7,7),(1,0,8),(1,1,9),(1,2,10),(1,3,11),\ @@ -106,13 +106,13 @@ class TestInterruptPmd(TestCase): % (self.path, eal_para) ) - self.dut.send_expect(cmd, "L3FWD_POWER", 60) + self.sut_node.send_expect(cmd, "L3FWD_POWER", 60) portQueueLcore = self.trafficFlow["Flow3"] self.verifier_result(40, 2, portQueueLcore) def verifier_result(self, num, portnum, portQueueLcore): self.scapy_send_packet(num, portnum) - result = self.dut.get_session_output(timeout=5) + result = self.sut_node.get_session_output(timeout=5) for i in range(len(portQueueLcore)): lcorePort = portQueueLcore[i] self.verify( @@ -131,20 +131,20 @@ class TestInterruptPmd(TestCase): """ Send a packet to port """ - for i in range(len(self.dut_ports[:portnum])): - txport = self.tester.get_local_port(self.dut_ports[i]) - mac = self.dut.get_mac_address(self.dut_ports[i]) - txItf = self.tester.get_interface(txport) + for i in range(len(self.sut_ports[:portnum])): + txport = self.tg_node.get_local_port(self.sut_ports[i]) + mac = self.sut_node.get_mac_address(self.sut_ports[i]) + txItf = self.tg_node.get_interface(txport) self.verify( - self.tester.is_interface_up(intf=txItf), - "Tester's %s should be up".format(txItf), + self.tg_node.is_interface_up(intf=txItf), + "TG's %s should be up".format(txItf), ) for j in range(num): - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether()/IP(dst="198.0.0.%d")/UDP()/Raw(\'X\'*18)], iface="%s")' % (j, txItf) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def tear_down(self): """ @@ -156,5 +156,5 @@ class TestInterruptPmd(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() self.set_nic_driver(self.default_driver) diff --git a/tests/TestSuite_ip_pipeline.py b/tests/TestSuite_ip_pipeline.py index ab1637b5..84b70187 100644 --- a/tests/TestSuite_ip_pipeline.py +++ b/tests/TestSuite_ip_pipeline.py @@ -21,15 +21,14 @@ from scapy.sendrecv import sendp, sniff from scapy.utils import hexstr, rdpcap, wrpcap import framework.utils as utils -from framework.crb import Crb -from framework.dut import Dut from framework.exception import VerifyFailure -from framework.packet import Packet +from framework.node import Node from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut from framework.settings import DRIVERS, HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase -from framework.virt_dut import VirtDut +from framework.virt_sut import VirtSut class TestIPPipeline(TestCase): @@ -39,7 +38,7 @@ class TestIPPipeline(TestCase): """ param = "" direct_param = r"(\s+)\[ (\S+) in\|out\|inout \]" - out = self.tester.send_expect("tcpdump -h", "# ") + out = self.tg_node.send_expect("tcpdump -h", "# ") for line in out.split("\n"): m = re.match(direct_param, line) if m: @@ -59,20 +58,20 @@ class TestIPPipeline(TestCase): Starts tcpdump in the background to sniff packets that received by interface. """ command = "rm -f /tmp/tcpdump_{0}.pcap".format(interface) - self.tester.send_expect(command, "#") + self.tg_node.send_expect(command, "#") command = "tcpdump -n -e {0} -w /tmp/tcpdump_{1}.pcap -i {1} {2} 2>/tmp/tcpdump_{1}.out &".format( self.param_flow_dir, interface, filters ) - self.tester.send_expect(command, "# ") + self.tg_node.send_expect(command, "# ") def tcpdump_stop_sniff(self): """ Stops the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "# ") + self.tg_node.send_expect("killall tcpdump", "# ") # For the [pid]+ Done tcpdump... message after killing the process sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "# ") + self.tg_node.send_expect('echo "Cleaning buffer"', "# ") sleep(1) def write_pcap_file(self, pcap_file, pkts): @@ -95,21 +94,21 @@ class TestIPPipeline(TestCase): Sent pkts that read from the pcap_file. Return the sniff pkts. """ - tx_port = self.tester.get_local_port(self.dut_ports[from_port]) - rx_port = self.tester.get_local_port(self.dut_ports[to_port]) + tx_port = self.tg_node.get_local_port(self.sut_ports[from_port]) + rx_port = self.tg_node.get_local_port(self.sut_ports[to_port]) - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) self.tcpdump_start_sniff(rx_interface, filters) # Prepare the pkts to be sent - self.tester.scapy_foreground() - self.tester.scapy_append('pkt = rdpcap("%s")' % (pcap_file)) - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('pkt = rdpcap("%s")' % (pcap_file)) + self.tg_node.scapy_append( 'sendp(pkt, iface="%s", count=%d)' % (tx_interface, count) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() self.tcpdump_stop_sniff() @@ -120,48 +119,48 @@ class TestIPPipeline(TestCase): This is to set up vf environment. The pf is bound to dpdk driver. """ - self.dut.send_expect("modprobe vfio-pci", "# ") + self.sut_node.send_expect("modprobe vfio-pci", "# ") if driver == "default": - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() # one PF generate one VF for port_num in range(port_nums): - self.dut.generate_sriov_vfs_by_port(self.dut_ports[port_num], 1, driver) + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[port_num], 1, driver) self.sriov_vfs_port.append( - self.dut.ports_info[self.dut_ports[port_num]]["vfs_port"] + self.sut_node.ports_info[self.sut_ports[port_num]]["vfs_port"] ) if driver == "default": - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf0_interface, self.vf0_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf1_interface, self.vf1_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf2_interface, self.vf2_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf3_interface, self.vf3_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf0_interface, "# ", 3 ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf1_interface, "# ", 3 ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf2_interface, "# ", 3 ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf3_interface, "# ", 3 ) @@ -181,53 +180,53 @@ class TestIPPipeline(TestCase): self.session_secondary.send_expect(cmd, "# ", 20) time.sleep(5) if driver == self.drivername: - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(5) for port_num in range(port_nums): - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[port_num]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[port_num]) def set_up_all(self): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() self.port_nums = 4 self.verify( - len(self.dut_ports) >= self.port_nums, + len(self.sut_ports) >= self.port_nums, "Insufficient ports for speed testing", ) - self.dut_p0_pci = self.dut.get_port_pci(self.dut_ports[0]) - self.dut_p1_pci = self.dut.get_port_pci(self.dut_ports[1]) - self.dut_p2_pci = self.dut.get_port_pci(self.dut_ports[2]) - self.dut_p3_pci = self.dut.get_port_pci(self.dut_ports[3]) + self.sut_p0_pci = self.sut_node.get_port_pci(self.sut_ports[0]) + self.sut_p1_pci = self.sut_node.get_port_pci(self.sut_ports[1]) + self.sut_p2_pci = self.sut_node.get_port_pci(self.sut_ports[2]) + self.sut_p3_pci = self.sut_node.get_port_pci(self.sut_ports[3]) - self.dut_p0_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.dut_p1_mac = self.dut.get_mac_address(self.dut_ports[1]) - self.dut_p2_mac = self.dut.get_mac_address(self.dut_ports[2]) - self.dut_p3_mac = self.dut.get_mac_address(self.dut_ports[3]) + self.sut_p0_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.sut_p1_mac = self.sut_node.get_mac_address(self.sut_ports[1]) + self.sut_p2_mac = self.sut_node.get_mac_address(self.sut_ports[2]) + self.sut_p3_mac = self.sut_node.get_mac_address(self.sut_ports[3]) - self.pf0_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_interface = self.dut.ports_info[self.dut_ports[1]]["intf"] - self.pf2_interface = self.dut.ports_info[self.dut_ports[2]]["intf"] - self.pf3_interface = self.dut.ports_info[self.dut_ports[3]]["intf"] + self.pf0_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_interface = self.sut_node.ports_info[self.sut_ports[1]]["intf"] + self.pf2_interface = self.sut_node.ports_info[self.sut_ports[2]]["intf"] + self.pf3_interface = self.sut_node.ports_info[self.sut_ports[3]]["intf"] self.vf0_mac = "00:11:22:33:44:55" self.vf1_mac = "00:11:22:33:44:56" self.vf2_mac = "00:11:22:33:44:57" self.vf3_mac = "00:11:22:33:44:58" - ports = [self.dut_p0_pci, self.dut_p1_pci, self.dut_p2_pci, self.dut_p3_pci] - self.eal_para = self.dut.create_eal_parameters( + ports = [self.sut_p0_pci, self.sut_p1_pci, self.sut_p2_pci, self.sut_p3_pci] + self.eal_para = self.sut_node.create_eal_parameters( cores=list(range(2)), ports=ports ) self.sriov_vfs_port = [] - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() - out = self.dut.build_dpdk_apps("./examples/ip_pipeline") + out = self.sut_node.build_dpdk_apps("./examples/ip_pipeline") self.verify("Error" not in out, "Compilation error") - self.app_ip_pipline_path = self.dut.apps_name["ip_pipeline"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_ip_pipline_path = self.sut_node.apps_name["ip_pipeline"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.param_flow_dir = self.get_flow_direction_param_of_tcpdump() def set_up(self): @@ -242,35 +241,35 @@ class TestIPPipeline(TestCase): """ cmd = ( "sed -i -e 's/0000:02:00.0/%s/' ./examples/ip_pipeline/examples/route.cli" - % self.dut_p0_pci + % self.sut_p0_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:02:00.1/%s/' ./examples/ip_pipeline/examples/route.cli" - % self.dut_p1_pci + % self.sut_p1_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.0/%s/' ./examples/ip_pipeline/examples/route.cli" - % self.dut_p2_pci + % self.sut_p2_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.1/%s/' ./examples/ip_pipeline/examples/route.cli" - % self.dut_p3_pci + % self.sut_p3_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) SCRIPT_FILE = "./examples/ip_pipeline/examples/route.cli" cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, self.eal_para, SCRIPT_FILE ) - self.dut.send_expect(cmd, "30:31:32:33:34:35", 60) + self.sut_node.send_expect(cmd, "30:31:32:33:34:35", 60) # rule 0 test pcap_file = "/tmp/route_0.pcap" - pkt = [Ether(dst=self.dut_p0_mac) / IP(dst="100.0.0.1") / Raw(load="X" * 26)] + pkt = [Ether(dst=self.sut_p0_mac) / IP(dst="100.0.0.1") / Raw(load="X" * 26)] self.write_pcap_file(pcap_file, pkt) filters = "dst host 100.0.0.1" sniff_pkts = self.send_and_sniff_pkts(0, 0, pcap_file, filters) @@ -281,7 +280,7 @@ class TestIPPipeline(TestCase): # rule 1 test pcap_file = "/tmp/route_1.pcap" - pkt = [Ether(dst=self.dut_p0_mac) / IP(dst="100.64.0.1") / Raw(load="X" * 26)] + pkt = [Ether(dst=self.sut_p0_mac) / IP(dst="100.64.0.1") / Raw(load="X" * 26)] self.write_pcap_file(pcap_file, pkt) filters = "dst host 100.64.0.1" sniff_pkts = self.send_and_sniff_pkts(0, 1, pcap_file, filters) @@ -292,7 +291,7 @@ class TestIPPipeline(TestCase): # rule 2 test pcap_file = "/tmp/route_2.pcap" - pkt = [Ether(dst=self.dut_p0_mac) / IP(dst="100.128.0.1") / Raw(load="X" * 26)] + pkt = [Ether(dst=self.sut_p0_mac) / IP(dst="100.128.0.1") / Raw(load="X" * 26)] self.write_pcap_file(pcap_file, pkt) filters = "dst host 100.128.0.1" sniff_pkts = self.send_and_sniff_pkts(0, 2, pcap_file, filters) @@ -303,7 +302,7 @@ class TestIPPipeline(TestCase): # rule 3 test pcap_file = "/tmp/route_3.pcap" - pkt = [Ether(dst=self.dut_p0_mac) / IP(dst="100.192.0.1") / Raw(load="X" * 26)] + pkt = [Ether(dst=self.sut_p0_mac) / IP(dst="100.192.0.1") / Raw(load="X" * 26)] self.write_pcap_file(pcap_file, pkt) filters = "dst host 100.192.0.1" sniff_pkts = self.send_and_sniff_pkts(0, 3, pcap_file, filters) @@ -314,7 +313,7 @@ class TestIPPipeline(TestCase): sleep(1) cmd = "^C" - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) def test_firewall_pipeline(self): """ @@ -322,36 +321,36 @@ class TestIPPipeline(TestCase): """ cmd = ( "sed -i -e 's/0000:02:00.0/%s/' ./examples/ip_pipeline/examples/firewall.cli" - % self.dut_p0_pci + % self.sut_p0_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:02:00.1/%s/' ./examples/ip_pipeline/examples/firewall.cli" - % self.dut_p1_pci + % self.sut_p1_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.0/%s/' ./examples/ip_pipeline/examples/firewall.cli" - % self.dut_p2_pci + % self.sut_p2_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.1/%s/' ./examples/ip_pipeline/examples/firewall.cli" - % self.dut_p3_pci + % self.sut_p3_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) SCRIPT_FILE = "./examples/ip_pipeline/examples/firewall.cli" cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, self.eal_para, SCRIPT_FILE ) - self.dut.send_expect(cmd, "fwd port 3", 60) + self.sut_node.send_expect(cmd, "fwd port 3", 60) # rule 0 test pcap_file = "/tmp/fw_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(dst="100.0.0.1") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -367,7 +366,7 @@ class TestIPPipeline(TestCase): # rule 1 test pcap_file = "/tmp/fw_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(dst="100.64.0.1") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -383,7 +382,7 @@ class TestIPPipeline(TestCase): # rule 2 test pcap_file = "/tmp/fw_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(dst="100.128.0.1") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -399,7 +398,7 @@ class TestIPPipeline(TestCase): # rule 3 test pcap_file = "/tmp/fw_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(dst="100.192.0.1") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -414,7 +413,7 @@ class TestIPPipeline(TestCase): sleep(1) cmd = "^C" - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) def test_flow_pipeline(self): """ @@ -422,36 +421,36 @@ class TestIPPipeline(TestCase): """ cmd = ( "sed -i -e 's/0000:02:00.0/%s/' ./examples/ip_pipeline/examples/flow.cli" - % self.dut_p0_pci + % self.sut_p0_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:02:00.1/%s/' ./examples/ip_pipeline/examples/flow.cli" - % self.dut_p1_pci + % self.sut_p1_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.0/%s/' ./examples/ip_pipeline/examples/flow.cli" - % self.dut_p2_pci + % self.sut_p2_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.1/%s/' ./examples/ip_pipeline/examples/flow.cli" - % self.dut_p3_pci + % self.sut_p3_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) SCRIPT_FILE = "./examples/ip_pipeline/examples/flow.cli" cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, self.eal_para, SCRIPT_FILE ) - self.dut.send_expect(cmd, "fwd port 3", 60) + self.sut_node.send_expect(cmd, "fwd port 3", 60) # rule 0 test pcap_file = "/tmp/fl_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.10", dst="200.0.0.10") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -467,7 +466,7 @@ class TestIPPipeline(TestCase): # rule 1 test pcap_file = "/tmp/fl_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.11", dst="200.0.0.11") / TCP(sport=101, dport=201) / Raw(load="X" * 6) @@ -483,7 +482,7 @@ class TestIPPipeline(TestCase): # rule 2 test pcap_file = "/tmp/fl_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.12", dst="200.0.0.12") / TCP(sport=102, dport=202) / Raw(load="X" * 6) @@ -499,7 +498,7 @@ class TestIPPipeline(TestCase): # rule 3 test pcap_file = "/tmp/fl_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.13", dst="200.0.0.13") / TCP(sport=103, dport=203) / Raw(load="X" * 6) @@ -514,7 +513,7 @@ class TestIPPipeline(TestCase): sleep(1) cmd = "^C" - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) def test_l2fwd_pipeline(self): """ @@ -522,36 +521,36 @@ class TestIPPipeline(TestCase): """ cmd = ( "sed -i -e 's/0000:02:00.0/%s/' ./examples/ip_pipeline/examples/l2fwd.cli" - % self.dut_p0_pci + % self.sut_p0_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:02:00.1/%s/' ./examples/ip_pipeline/examples/l2fwd.cli" - % self.dut_p1_pci + % self.sut_p1_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.0/%s/' ./examples/ip_pipeline/examples/l2fwd.cli" - % self.dut_p2_pci + % self.sut_p2_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.1/%s/' ./examples/ip_pipeline/examples/l2fwd.cli" - % self.dut_p3_pci + % self.sut_p3_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) SCRIPT_FILE = "./examples/ip_pipeline/examples/l2fwd.cli" cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, self.eal_para, SCRIPT_FILE ) - self.dut.send_expect(cmd, "fwd port 2", 60) + self.sut_node.send_expect(cmd, "fwd port 2", 60) # rule 0 test pcap_file = "/tmp/pt_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.10", dst="200.0.0.10") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -567,7 +566,7 @@ class TestIPPipeline(TestCase): # rule 1 test pcap_file = "/tmp/pt_1.pcap" pkt = [ - Ether(dst=self.dut_p1_mac) + Ether(dst=self.sut_p1_mac) / IP(src="100.0.0.11", dst="200.0.0.11") / TCP(sport=101, dport=201) / Raw(load="X" * 6) @@ -583,7 +582,7 @@ class TestIPPipeline(TestCase): # rule 2 test pcap_file = "/tmp/pt_2.pcap" pkt = [ - Ether(dst=self.dut_p2_mac) + Ether(dst=self.sut_p2_mac) / IP(src="100.0.0.12", dst="200.0.0.12") / TCP(sport=102, dport=202) / Raw(load="X" * 6) @@ -599,7 +598,7 @@ class TestIPPipeline(TestCase): # rule 3 test pcap_file = "/tmp/pt_3.pcap" pkt = [ - Ether(dst=self.dut_p3_mac) + Ether(dst=self.sut_p3_mac) / IP(src="100.0.0.13", dst="200.0.0.13") / TCP(sport=103, dport=203) / Raw(load="X" * 6) @@ -614,46 +613,46 @@ class TestIPPipeline(TestCase): sleep(1) cmd = "^C" - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) def test_pfdpdk_vf_l2fwd_pipeline(self): """ VF l2fwd pipeline, PF bound to DPDK driver """ self.setup_env(self.port_nums, driver=self.drivername) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/^link LINK/d' ./examples/ip_pipeline/examples/l2fwd.cli", "# ", 20 ) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK3 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[3][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK2 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[2][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK1 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[1][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK0 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[0][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) - DUT_PF_PORTS = [ - self.dut_p0_pci, - self.dut_p1_pci, - self.dut_p2_pci, - self.dut_p3_pci, + SUT_PF_PORTS = [ + self.sut_p0_pci, + self.sut_p1_pci, + self.sut_p2_pci, + self.sut_p3_pci, ] PF_SCRIPT_FILE = "--socket-mem 1024,1024" - DUT_VF_PORTS = [ + SUT_VF_PORTS = [ self.sriov_vfs_port[0][0].pci, self.sriov_vfs_port[1][0].pci, self.sriov_vfs_port[2][0].pci, @@ -661,20 +660,20 @@ class TestIPPipeline(TestCase): ] VF_SCRIPT_FILE = "./examples/ip_pipeline/examples/l2fwd.cli" - pf_eal_para = self.dut.create_eal_parameters( - cores=list(range(4, 8)), prefix="pf", ports=DUT_PF_PORTS + pf_eal_para = self.sut_node.create_eal_parameters( + cores=list(range(4, 8)), prefix="pf", ports=SUT_PF_PORTS ) pf_cmd = "{0} {1} {2} -- -i".format( self.app_testpmd_path, pf_eal_para, PF_SCRIPT_FILE ) - self.dut.send_expect(pf_cmd, "testpmd> ", 60) - self.dut.send_expect("set vf mac addr 0 0 %s" % self.vf0_mac, "testpmd> ", 30) - self.dut.send_expect("set vf mac addr 1 0 %s" % self.vf1_mac, "testpmd> ", 30) - self.dut.send_expect("set vf mac addr 2 0 %s" % self.vf2_mac, "testpmd> ", 30) - self.dut.send_expect("set vf mac addr 3 0 %s" % self.vf3_mac, "testpmd> ", 30) - - vf_eal_para = self.dut.create_eal_parameters( - cores=list(range(2)), ports=DUT_VF_PORTS + self.sut_node.send_expect(pf_cmd, "testpmd> ", 60) + self.sut_node.send_expect("set vf mac addr 0 0 %s" % self.vf0_mac, "testpmd> ", 30) + self.sut_node.send_expect("set vf mac addr 1 0 %s" % self.vf1_mac, "testpmd> ", 30) + self.sut_node.send_expect("set vf mac addr 2 0 %s" % self.vf2_mac, "testpmd> ", 30) + self.sut_node.send_expect("set vf mac addr 3 0 %s" % self.vf3_mac, "testpmd> ", 30) + + vf_eal_para = self.sut_node.create_eal_parameters( + cores=list(range(2)), ports=SUT_VF_PORTS ) vf_cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, vf_eal_para, VF_SCRIPT_FILE @@ -753,31 +752,31 @@ class TestIPPipeline(TestCase): VF l2fwd pipeline, PF bound to kernel driver """ self.setup_env(self.port_nums, driver="default") - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/^link LINK/d' ./examples/ip_pipeline/examples/l2fwd.cli", "# ", 20 ) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK3 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[3][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK2 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[2][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK1 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[1][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i '/mempool MEMPOOL0/a\link LINK0 dev %s rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on' ./examples/ip_pipeline/examples/l2fwd.cli" % self.sriov_vfs_port[0][0].pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) - DUT_VF_PORTS = [ + SUT_VF_PORTS = [ self.sriov_vfs_port[0][0].pci, self.sriov_vfs_port[1][0].pci, self.sriov_vfs_port[2][0].pci, @@ -785,8 +784,8 @@ class TestIPPipeline(TestCase): ] VF_SCRIPT_FILE = "./examples/ip_pipeline/examples/l2fwd.cli" - vf_eal_para = self.dut.create_eal_parameters( - cores=list(range(2)), ports=DUT_VF_PORTS + vf_eal_para = self.sut_node.create_eal_parameters( + cores=list(range(2)), ports=SUT_VF_PORTS ) vf_cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, vf_eal_para, VF_SCRIPT_FILE @@ -859,8 +858,8 @@ class TestIPPipeline(TestCase): sleep(1) self.destroy_env(self.port_nums, driver=self.drivername) - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver(driver=self.drivername) def test_pipeline_with_tap(self): @@ -869,25 +868,25 @@ class TestIPPipeline(TestCase): """ cmd = ( "sed -i -e 's/0000:02:00.0/%s/' ./examples/ip_pipeline/examples/tap.cli" - % self.dut_p0_pci + % self.sut_p0_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:02:00.1/%s/' ./examples/ip_pipeline/examples/tap.cli" - % self.dut_p1_pci + % self.sut_p1_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) - DUT_PORTS = [self.dut_p0_pci, self.dut_p1_pci] + SUT_PORTS = [self.sut_p0_pci, self.sut_p1_pci] SCRIPT_FILE = "./examples/ip_pipeline/examples/tap.cli" - eal_para = self.dut.create_eal_parameters(cores=list(range(2)), ports=DUT_PORTS) + eal_para = self.sut_node.create_eal_parameters(cores=list(range(2)), ports=SUT_PORTS) cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, eal_para, SCRIPT_FILE ) - self.dut.send_expect(cmd, "fwd port 3", 60) + self.sut_node.send_expect(cmd, "fwd port 3", 60) - tap_session = self.dut.new_session() + tap_session = self.sut_node.new_session() cmd = "ip link set br1 down; brctl delbr br1" tap_session.send_expect(cmd, "# ", 20) cmd = "brctl addbr br1; brctl addif br1 TAP0; brctl addif br1 TAP1" @@ -897,7 +896,7 @@ class TestIPPipeline(TestCase): # rule 0 test pcap_file = "/tmp/tap_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.10", dst="200.0.0.10") / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -913,7 +912,7 @@ class TestIPPipeline(TestCase): # rule 1 test pcap_file = "/tmp/tap_1.pcap" pkt = [ - Ether(dst=self.dut_p1_mac) + Ether(dst=self.sut_p1_mac) / IP(src="100.0.0.11", dst="200.0.0.11") / TCP(sport=101, dport=201) / Raw(load="X" * 6) @@ -928,11 +927,11 @@ class TestIPPipeline(TestCase): sleep(1) cmd = "^C" - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = "ip link set br1 down; brctl delbr br1" tap_session.send_expect(cmd, "# ", 20) - self.dut.close_session(tap_session) + self.sut_node.close_session(tap_session) def test_rss_pipeline(self): """ @@ -940,38 +939,38 @@ class TestIPPipeline(TestCase): """ cmd = ( "sed -i -e 's/0000:02:00.0/%s/' ./examples/ip_pipeline/examples/rss.cli" - % self.dut_p0_pci + % self.sut_p0_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:02:00.1/%s/' ./examples/ip_pipeline/examples/rss.cli" - % self.dut_p1_pci + % self.sut_p1_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.0/%s/' ./examples/ip_pipeline/examples/rss.cli" - % self.dut_p2_pci + % self.sut_p2_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) cmd = ( "sed -i -e 's/0000:06:00.1/%s/' ./examples/ip_pipeline/examples/rss.cli" - % self.dut_p3_pci + % self.sut_p3_pci ) - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) - DUT_PORTS = [self.dut_p0_pci, self.dut_p1_pci, self.dut_p2_pci, self.dut_p3_pci] + SUT_PORTS = [self.sut_p0_pci, self.sut_p1_pci, self.sut_p2_pci, self.sut_p3_pci] SCRIPT_FILE = "./examples/ip_pipeline/examples/rss.cli" - eal_para = self.dut.create_eal_parameters(cores=list(range(5)), ports=DUT_PORTS) + eal_para = self.sut_node.create_eal_parameters(cores=list(range(5)), ports=SUT_PORTS) cmd = "{0} {1} -- -s {2}".format( self.app_ip_pipline_path, eal_para, SCRIPT_FILE ) - self.dut.send_expect(cmd, "PIPELINE3 enable", 60) + self.sut_node.send_expect(cmd, "PIPELINE3 enable", 60) # rule 0 test pcap_file = "/tmp/rss_0.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.10.1", dst="100.0.20.2") / Raw(load="X" * 6) ] @@ -986,7 +985,7 @@ class TestIPPipeline(TestCase): # rule 1 test pcap_file = "/tmp/rss_1.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.0", dst="100.0.0.1") / Raw(load="X" * 6) ] @@ -1001,7 +1000,7 @@ class TestIPPipeline(TestCase): # rule 2 test pcap_file = "/tmp/rss_2.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.10.1", dst="100.0.0.2") / Raw(load="X" * 6) ] @@ -1016,7 +1015,7 @@ class TestIPPipeline(TestCase): # rule 3 test pcap_file = "/tmp/rss_3.pcap" pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="100.0.0.1", dst="100.0.10.2") / Raw(load="X" * 6) ] @@ -1030,7 +1029,7 @@ class TestIPPipeline(TestCase): sleep(1) cmd = "^C" - self.dut.send_expect(cmd, "# ", 20) + self.sut_node.send_expect(cmd, "# ", 20) def tear_down(self): """ @@ -1042,5 +1041,5 @@ class TestIPPipeline(TestCase): """ Run after each test suite. """ - self.dut.close_session(self.session_secondary) - self.dut.kill_all() + self.sut_node.close_session(self.session_secondary) + self.sut_node.kill_all() diff --git a/tests/TestSuite_ipfrag.py b/tests/TestSuite_ipfrag.py index 62170865..1e57f3d9 100644 --- a/tests/TestSuite_ipfrag.py +++ b/tests/TestSuite_ipfrag.py @@ -13,10 +13,10 @@ import string import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream lpm_table_ipv6 = [ "{{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, P1}", @@ -46,15 +46,15 @@ class TestIpfrag(TestCase): """ # Based on h/w type, choose how many ports to use - self.ports = self.dut.get_ports() + self.ports = self.sut_node.get_ports() # Verify that enough ports are available self.verify(len(self.ports) >= 2, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.ports[0]) # Verify that enough threads are available - cores = self.dut.get_core_list("1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") global P0, P1 @@ -62,19 +62,19 @@ class TestIpfrag(TestCase): P1 = self.ports[1] # make application - out = self.dut.build_dpdk_apps("examples/ip_fragmentation") + out = self.sut_node.build_dpdk_apps("examples/ip_fragmentation") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/1C/2T", socket=self.ports_socket, ports=self.ports ) portmask = utils.create_mask([P0, P1]) numPortThread = len([P0, P1]) / len(cores) # run ipv4_frag - self.app_ip_fragmentation_path = self.dut.apps_name["ip_fragmentation"] - self.dut.send_expect( + self.app_ip_fragmentation_path = self.sut_node.apps_name["ip_fragmentation"] + self.sut_node.send_expect( "%s %s -- -p %s -q %s" % ( self.app_ip_fragmentation_path, @@ -87,9 +87,9 @@ class TestIpfrag(TestCase): ) time.sleep(2) - self.txItf = self.tester.get_interface(self.tester.get_local_port(P0)) - self.rxItf = self.tester.get_interface(self.tester.get_local_port(P1)) - self.dmac = self.dut.get_mac_address(P0) + self.txItf = self.tg_node.get_interface(self.tg_node.get_local_port(P0)) + self.rxItf = self.tg_node.get_interface(self.tg_node.get_local_port(P1)) + self.dmac = self.sut_node.get_mac_address(P0) # get dts output path if self.logger.log_path.startswith(os.sep): @@ -98,7 +98,7 @@ class TestIpfrag(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def functional_check_ipv4(self, pkt_sizes, burst=1, flag=None): """ @@ -119,19 +119,19 @@ class TestIpfrag(TestCase): expPkts = 1 val = 2 - inst = self.tester.tcpdump_sniff_packets(intf=self.rxItf) + inst = self.tg_node.tcpdump_sniff_packets(intf=self.rxItf) # send packet for times in range(burst): pkt_size = pkt_sizes[pkt_sizes.index(size) + times] - pkt = Packet(pkt_type="UDP", pkt_len=pkt_size) - pkt.config_layer("ether", {"dst": "%s" % self.dmac}) - pkt.config_layer( + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=pkt_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dmac}) + scapy_pkt_builder.config_layer( "ipv4", {"dst": "100.20.0.1", "src": "1.2.3.4", "flags": val} ) - pkt.send_pkt(self.tester, tx_port=self.txItf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf) # verify normal packet just by number, verify fragment packet by all elements - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) self.verify( len(pkts) == expPkts, "in functional_check_ipv4(): failed on forward packet size " @@ -180,23 +180,23 @@ class TestIpfrag(TestCase): expPkts = 1 val = 2 - inst = self.tester.tcpdump_sniff_packets(intf=self.rxItf) + inst = self.tg_node.tcpdump_sniff_packets(intf=self.rxItf) # send packet for times in range(burst): pkt_size = pkt_sizes[pkt_sizes.index(size) + times] - pkt = Packet(pkt_type="IPv6_UDP", pkt_len=pkt_size) - pkt.config_layer("ether", {"dst": "%s" % self.dmac}) - pkt.config_layer( + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IPv6_UDP", pkt_len=pkt_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dmac}) + scapy_pkt_builder.config_layer( "ipv6", { "dst": "201:101:101:101:101:101:101:101", "src": "ee80:ee80:ee80:ee80:ee80:ee80:ee80:ee80", }, ) - pkt.send_pkt(self.tester, tx_port=self.txItf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf) # verify normal packet just by number, verify fragment packet by all elements - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) self.verify( len(pkts) == expPkts, "In functional_check_ipv6(): failed on forward packet size " @@ -233,14 +233,14 @@ class TestIpfrag(TestCase): """ Run before each test case. """ - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu 9200" - % self.tester.get_interface(self.tester.get_local_port(P0)), + % self.tg_node.get_interface(self.tg_node.get_local_port(P0)), "#", ) - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu 9200" - % self.tester.get_interface(self.tester.get_local_port(P1)), + % self.tg_node.get_interface(self.tg_node.get_local_port(P1)), "#", ) @@ -269,7 +269,7 @@ class TestIpfrag(TestCase): """ sizelist = [1519, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000] - cores = self.dut.get_core_list("1S/1C/2T") + cores = self.sut_node.get_core_list("1S/1C/2T") self.functional_check_ipv4(sizelist, 1, "frag") self.functional_check_ipv6(sizelist, 1, "frag") @@ -284,14 +284,14 @@ class TestIpfrag(TestCase): Pct = dict() if int(lcore[0]) == 1: - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( cores=lcore, socket=self.ports_socket, ports=self.ports ) else: - eal_param = self.dut.create_eal_parameters(cores=lcore, ports=self.ports) + eal_param = self.sut_node.create_eal_parameters(cores=lcore, ports=self.ports) portmask = utils.create_mask([P0, P1]) - self.dut.send_expect("^c", "# ", 120) - self.dut.send_expect( + self.sut_node.send_expect("^c", "# ", 120) + self.sut_node.send_expect( "%s %s -- -p %s -q %s" % (self.app_ip_fragmentation_path, eal_param, portmask, num_pthreads), "IP_FRAG:", @@ -299,7 +299,7 @@ class TestIpfrag(TestCase): ) result = [2, lcore, num_pthreads] for size in size_list: - dmac = self.dut.get_mac_address(P0) + dmac = self.sut_node.get_mac_address(P0) flows_p0 = [ 'Ether(dst="%s")/IP(src="1.2.3.4", dst="100.10.0.1", flags=0)/("X"*%d)' % (dmac, size - 38), @@ -312,7 +312,7 @@ class TestIpfrag(TestCase): ] # reserved for rx/tx bidirection test - dmac = self.dut.get_mac_address(P1) + dmac = self.sut_node.get_mac_address(P1) flows_p1 = [ 'Ether(dst="%s")/IP(src="1.2.3.4", dst="100.30.0.1", flags=0)/("X"*%d)' % (dmac, size - 38), @@ -328,22 +328,22 @@ class TestIpfrag(TestCase): for i in range(flow_len): pcap0 = os.sep.join([self.output_path, "p0_{}.pcap".format(i)]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap0, flows_p0[i])) + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap0, flows_p0[i])) pcap1 = os.sep.join([self.output_path, "p1_{}.pcap".format(i)]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap1, flows_p1[i])) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap1, flows_p1[i])) + self.tg_node.scapy_execute() tgenInput.append( ( - self.tester.get_local_port(P0), - self.tester.get_local_port(P1), + self.tg_node.get_local_port(P0), + self.tg_node.get_local_port(P1), pcap0, ) ) tgenInput.append( ( - self.tester.get_local_port(P1), - self.tester.get_local_port(P0), + self.tg_node.get_local_port(P1), + self.tg_node.get_local_port(P0), pcap1, ) ) @@ -352,12 +352,12 @@ class TestIpfrag(TestCase): # wireSpd = 2 * 10000.0 / ((20 + size) * 8) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) - Bps[str(size)], Pps[str(size)] = self.tester.pktgen.measure_throughput( + Bps[str(size)], Pps[str(size)] = self.tg_node.perf_tg.measure_throughput( stream_ids=streams ) @@ -370,7 +370,7 @@ class TestIpfrag(TestCase): self.result_table_add(result) - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") def test_perf_ipfrag_throughtput(self): """ @@ -397,14 +397,14 @@ class TestIpfrag(TestCase): """ Run after each test case. """ - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu 1500" - % self.tester.get_interface(self.tester.get_local_port(P0)), + % self.tg_node.get_interface(self.tg_node.get_local_port(P0)), "#", ) - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu 1500" - % self.tester.get_interface(self.tester.get_local_port(P1)), + % self.tg_node.get_interface(self.tg_node.get_local_port(P1)), "#", ) @@ -412,5 +412,5 @@ class TestIpfrag(TestCase): """ Run after each test suite. """ - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") pass diff --git a/tests/TestSuite_ipgre.py b/tests/TestSuite_ipgre.py index cd50c1c4..6ec2d79f 100644 --- a/tests/TestSuite_ipgre.py +++ b/tests/TestSuite_ipgre.py @@ -5,8 +5,8 @@ """ DPDK Test suite. -Generic Routing Encapsulation (GRE) is a tunneling protocol developed by -Cisco Systems that can encapsulate a wide variety of network layer protocols +Generic Routing Encapsulation (GRE) is a tunneling protocol developed by +Cisco Systems that can encapsulate a wide variety of network layer protocols inside virtual point-to-point links over an Internet Protocol network. Intel® Ethernet 700 Series support GRE packet detecting, checksum computing @@ -25,8 +25,8 @@ from scapy.utils import rdpcap, wrpcap import framework.utils as utils from framework.exception import VerifyFailure -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -36,7 +36,7 @@ class TestIpgre(TestCase): Run at the start of each test suite. """ self.printFlag = self._enable_debug - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() self.verify( self.nic in [ @@ -55,16 +55,16 @@ class TestIpgre(TestCase): "Adapter X710-T2L and cavium", ) self.verify(len(ports) >= 1, "Insufficient ports for testing") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] # start testpmd - self.dut_port = valports[0] - self.dut_ports = self.dut.get_ports(self.nic) - self.portMask = utils.create_mask([self.dut_ports[0]]) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.pmdout = PmdOutput(self.dut) - tester_port = self.tester.get_local_port(self.dut_port) - self.tester_iface = self.tester.get_interface(tester_port) - self.tester_iface_mac = self.tester.get_mac(tester_port) + self.sut_port = valports[0] + self.sut_ports = self.sut_node.get_ports(self.nic) + self.portMask = utils.create_mask([self.sut_ports[0]]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.pmdout = PmdOutput(self.sut_node) + tg_port = self.tg_node.get_local_port(self.sut_port) + self.tg_iface = self.tg_node.get_interface(tg_port) + self.tg_iface_mac = self.tg_node.get_mac(tg_port) self.initialize_port_config() def initialize_port_config(self): @@ -88,15 +88,15 @@ class TestIpgre(TestCase): time.sleep(1) for pkt_type in list(pkt_types.keys()): pkt_names = pkt_types[pkt_type] - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) if layer_configs: for layer in list(layer_configs.keys()): - pkt.config_layer(layer, layer_configs[layer]) - inst = self.tester.tcpdump_sniff_packets(self.tester_iface, count=1) - pkt.send_pkt(crb=self.tester, tx_port=self.tester_iface, count=4) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder.config_layer(layer, layer_configs[layer]) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_iface, count=1) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tg_iface, count=4) + out = self.sut_node.get_session_output(timeout=2) time.sleep(1) - pkt = self.tester.load_tcpdump_sniff_packets(inst) + scapy_pkt_builder = self.tg_node.load_tcpdump_sniff_packets(inst) if self.printFlag: # debug output print(out) for pkt_layer_name in pkt_names: @@ -122,16 +122,16 @@ class TestIpgre(TestCase): ("Receive queue=0x%s" % queue) not in out, "Failed to enter the right queue.", ) - return pkt + return scapy_pkt_builder def save_ref_packet(self, pkt_types, layer_configs=None): for pkt_type in list(pkt_types.keys()): pkt_names = pkt_types[pkt_type] - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) if layer_configs: for layer in list(layer_configs.keys()): - pkt.config_layer(layer, layer_configs[layer]) - wrpcap("/tmp/ref_pkt.pcap", pkt.pktgen.pkt) + scapy_pkt_builder.config_layer(layer, layer_configs[layer]) + wrpcap("/tmp/ref_pkt.pcap", scapy_pkt_builder.scapy_pkt_util.pkt) time.sleep(1) def get_chksums(self, pcap=None): @@ -143,7 +143,7 @@ class TestIpgre(TestCase): if isinstance(pcap, str): pkts = rdpcap(pcap) else: - pkts = pcap.pktgen.pkts + pkts = pcap.scapy_pkt_util.pkts for number in range(len(pkts)): if pkts[number].guess_payload_class(pkts[number]).name == "gre": payload = pkts[number][GRE] @@ -220,13 +220,13 @@ class TestIpgre(TestCase): socket=self.ports_socket, ) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.check_packet_transmission(pkt_types, config_layers) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def test_GRE_ipv6_packet_detect(self): """ @@ -309,9 +309,9 @@ class TestIpgre(TestCase): socket=self.ports_socket, ) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # inner ipv4 config_layers = { @@ -339,7 +339,7 @@ class TestIpgre(TestCase): "raw": {"payload": ["78"] * 40}, } self.check_packet_transmission(pkt_types_ipv6_ipv6_SCTP, config_layers) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def test_GRE_packet_chksum_offload(self): """ @@ -353,19 +353,19 @@ class TestIpgre(TestCase): + " --enable-rx-cksum --port-topology=loop", socket=self.ports_socket, ) - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("set fwd csum", "testpmd>") - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("port stop all", "testpmd>") - self.dut.send_expect("csum set ip hw 0", "testpmd>") - self.dut.send_expect("csum set udp hw 0", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("set fwd csum", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("csum set ip hw 0", "testpmd>") + self.sut_node.send_expect("csum set udp hw 0", "testpmd>") if self.nic != "cavium_a063": - self.dut.send_expect("csum set sctp hw 0", "testpmd>") - self.dut.send_expect("csum set outer-ip hw 0", "testpmd>") - self.dut.send_expect("csum set tcp hw 0", "testpmd>") - self.dut.send_expect("csum parse-tunnel on 0", "testpmd>") - self.dut.send_expect("port start all", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("csum set sctp hw 0", "testpmd>") + self.sut_node.send_expect("csum set outer-ip hw 0", "testpmd>") + self.sut_node.send_expect("csum set tcp hw 0", "testpmd>") + self.sut_node.send_expect("csum parse-tunnel on 0", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # Send packet with wrong outer IP checksum and check forwarded packet IP checksum is correct pkt_types = {"MAC_IP_GRE_IPv4-TUNNEL_TCP_PKT": ["RTE_MBUF_F_TX_IP_CKSUM"]} @@ -499,14 +499,14 @@ class TestIpgre(TestCase): pkt = self.check_packet_transmission(pkt_types, config_layers) self.compare_checksum(pkt) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def tear_down(self): """ Run after each test case. Nothing to do. """ - self.dut.kill_all() + self.sut_node.kill_all() pass def tear_down_all(self): @@ -514,5 +514,5 @@ class TestIpgre(TestCase): Run after each test suite. Nothing to do. """ - self.dut.kill_all() + self.sut_node.kill_all() pass diff --git a/tests/TestSuite_ipsec_gw_cryptodev_func.py b/tests/TestSuite_ipsec_gw_cryptodev_func.py index d0deacdb..593f0c7a 100644 --- a/tests/TestSuite_ipsec_gw_cryptodev_func.py +++ b/tests/TestSuite_ipsec_gw_cryptodev_func.py @@ -6,7 +6,7 @@ import binascii import os.path import time -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils import tests.cryptodev_common as cc from framework.settings import CONFIG_ROOT_PATH @@ -17,19 +17,19 @@ class TestIPsecGW(TestCase): def set_up_all(self): self.core_config = "1S/3C/1T" self.number_of_ports = 2 - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) self.verify( - len(self.dut_ports) >= self.number_of_ports, + len(self.sut_ports) >= self.number_of_ports, "Not enough ports for " + self.nic, ) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.logger.info("core config = " + self.core_config) self.logger.info("number of ports = " + str(self.number_of_ports)) - self.logger.info("dut ports = " + str(self.dut_ports)) + self.logger.info("sut ports = " + str(self.sut_ports)) self.logger.info("ports_socket = " + str(self.ports_socket)) # Generally, testbed should has 4 ports NIC, like, # 03:00.0 03:00.1 03:00.2 03:00.3 @@ -39,17 +39,17 @@ class TestIPsecGW(TestCase): # - send test packet from 03:00.3 # - receive packet which forwarded by ipsec-secgw from 03:00.0 # - configure port and peer in dts port.cfg - self.tx_port = self.tester.get_local_port(self.dut_ports[1]) - self.rx_port = self.tester.get_local_port(self.dut_ports[0]) + self.tx_port = self.tg_node.get_local_port(self.sut_ports[1]) + self.rx_port = self.tg_node.get_local_port(self.sut_ports[0]) - self.tx_interface = self.tester.get_interface(self.tx_port) - self.rx_interface = self.tester.get_interface(self.rx_port) + self.tx_interface = self.tg_node.get_interface(self.tx_port) + self.rx_interface = self.tg_node.get_interface(self.rx_port) self.logger.info("tx interface = " + self.tx_interface) self.logger.info("rx interface = " + self.rx_interface) - self._app_path = self.dut.apps_name["ipsec-secgw"] - out = self.dut.build_dpdk_apps("./examples/ipsec-secgw") + self._app_path = self.sut_node.apps_name["ipsec-secgw"] + out = self.sut_node.build_dpdk_apps("./examples/ipsec-secgw") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") @@ -64,13 +64,13 @@ class TestIPsecGW(TestCase): } conf_file = os.path.join(CONFIG_ROOT_PATH, "ipsec_ep0.cfg") - self.dut.session.copy_file_to(conf_file, "/tmp") + self.sut_node.session.copy_file_to(conf_file, "/tmp") def set_up(self): pass def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): pass @@ -272,9 +272,9 @@ class TestIPsecGW(TestCase): ipsec_gw_opt_str = self._get_ipsec_gw_opt_str() cmd_str = cc.get_dpdk_app_cmd_str(self._app_path, eal_opt_str, ipsec_gw_opt_str) - self.dut.send_expect(cmd_str, "IPSEC:", 30) + self.sut_node.send_expect(cmd_str, "IPSEC:", 30) time.sleep(3) - inst = self.tester.tcpdump_sniff_packets(self.rx_interface) + inst = self.tg_node.tcpdump_sniff_packets(self.rx_interface) PACKET_COUNT = 65 payload = 256 * ["11"] @@ -286,34 +286,34 @@ class TestIPsecGW(TestCase): expected_src_ip = case_cfgs["expected_src_ip"] expected_spi = case_cfgs["expected_spi"] - pkt = packet.Packet() + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() if len(dst_ip) <= 15: - pkt.assign_layers(["ether", "ipv4", "udp", "raw"]) - pkt.config_layer( + scapy_pkt_builder.assign_layers(["ether", "ipv4", "udp", "raw"]) + scapy_pkt_builder.config_layer( "ether", {"src": "52:00:00:00:00:00", "dst": "52:00:00:00:00:01"} ) - pkt.config_layer("ipv4", {"src": src_ip, "dst": dst_ip}) + scapy_pkt_builder.config_layer("ipv4", {"src": src_ip, "dst": dst_ip}) else: - pkt.assign_layers(["ether", "ipv6", "udp", "raw"]) - pkt.config_layer( + scapy_pkt_builder.assign_layers(["ether", "ipv6", "udp", "raw"]) + scapy_pkt_builder.config_layer( "ether", {"src": "52:00:00:00:00:00", "dst": "52:00:00:00:00:01"} ) - pkt.config_layer("ipv6", {"src": src_ip, "dst": dst_ip}) - pkt.config_layer("udp", {"dst": 0}) - pkt.config_layer("raw", {"payload": payload}) - pkt.send_pkt(crb=self.tester, tx_port=self.tx_interface, count=PACKET_COUNT) + scapy_pkt_builder.config_layer("ipv6", {"src": src_ip, "dst": dst_ip}) + scapy_pkt_builder.config_layer("udp", {"dst": 0}) + scapy_pkt_builder.config_layer("raw", {"payload": payload}) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tx_interface, count=PACKET_COUNT) - pkt_rec = self.tester.load_tcpdump_sniff_packets(inst) + pkt_rec = self.tg_node.load_tcpdump_sniff_packets(inst) pcap_filename = "{0}.pcap".format(self.running_case) - self.logger.info("Save pkts to {0}".format(packet.TMP_PATH + pcap_filename)) - pkt_rec.save_pcapfile(self.tester, pcap_filename) + self.logger.info("Save pkts to {0}".format(scapy_pkt_builder.TMP_PATH + pcap_filename)) + pkt_rec.save_pcapfile(self.tg_node, pcap_filename) if len(pkt_rec) == 0: self.logger.error("IPsec forwarding failed") result = False for i in range(len(pkt_rec)): - pkt_src_ip = pkt_rec.pktgen.strip_layer3("src", p_index=i) + pkt_src_ip = pkt_rec.scapy_pkt_util.strip_layer3("src", p_index=i) if pkt_src_ip != expected_src_ip: pkt_rec[i].show() self.logger.error( @@ -324,7 +324,7 @@ class TestIPsecGW(TestCase): result = False break - pkt_dst_ip = pkt_rec.pktgen.strip_layer3("dst", p_index=i) + pkt_dst_ip = pkt_rec.scapy_pkt_util.strip_layer3("dst", p_index=i) self.logger.debug(pkt_dst_ip) if pkt_dst_ip != expected_dst_ip: pkt_rec[i].show() diff --git a/tests/TestSuite_ipv4_reassembly.py b/tests/TestSuite_ipv4_reassembly.py index 58b3fe3e..b27cc3d4 100644 --- a/tests/TestSuite_ipv4_reassembly.py +++ b/tests/TestSuite_ipv4_reassembly.py @@ -45,18 +45,18 @@ class IpReassemblyTestConfig(object): self.packets_config() def cpu_config(self): - self.eal_para = self.test_case.dut.create_eal_parameters(cores="1S/1C/1T") - self.core_list = self.test_case.dut.get_core_list("1S/1C/1T") + self.eal_para = self.test_case.sut_node.create_eal_parameters(cores="1S/1C/1T") + self.core_list = self.test_case.sut_node.get_core_list("1S/1C/1T") self.core_mask = utils.create_mask(self.core_list) - self.memory_channels = self.test_case.dut.get_memory_channels() + self.memory_channels = self.test_case.sut_node.get_memory_channels() def ports_config(self): - dut_ports = self.test_case.dut.get_ports(self.test_case.nic) - dut_port = dut_ports[0] - tester_port = self.test_case.tester.get_local_port(dut_port) - self.tester_iface = self.test_case.tester.get_interface(tester_port) - self.dut_port_mask = utils.create_mask([dut_port]) - self.queue_config = "({},{},{})".format(dut_port, "0", self.core_list[0]) + sut_ports = self.test_case.sut_node.get_ports(self.test_case.nic) + sut_port = sut_ports[0] + tg_port = self.test_case.tg_node.get_local_port(sut_port) + self.tg_iface = self.test_case.tg_node.get_interface(tg_port) + self.sut_port_mask = utils.create_mask([sut_port]) + self.queue_config = "({},{},{})".format(sut_port, "0", self.core_list[0]) def example_app_config(self): self.maxflows = 1024 @@ -96,29 +96,29 @@ class TestIpReassembly(TestCase): """ Changes the maximum number of frames by modifying the example app code. """ - self.dut.set_build_options( + self.sut_node.set_build_options( {"RTE_LIBRTE_IP_FRAG_MAX_FRAG": int(num_of_fragments)} ) - self.dut.send_expect("export RTE_TARGET=" + self.target, "#") - self.dut.send_expect("export RTE_SDK=`pwd`", "#") - self.dut.send_expect("rm -rf %s" % self.target, "# ", 5) - self.dut.build_install_dpdk(self.target) + self.sut_node.send_expect("export RTE_TARGET=" + self.target, "#") + self.sut_node.send_expect("export RTE_SDK=`pwd`", "#") + self.sut_node.send_expect("rm -rf %s" % self.target, "# ", 5) + self.sut_node.build_install_dpdk(self.target) - def set_tester_iface_mtu(self, iface, mtu=1500): + def set_tg_iface_mtu(self, iface, mtu=1500): """ Set the interface MTU value. """ command = "ip link set mtu {mtu} dev {iface}" - self.tester.send_expect(command.format(**locals()), "#") + self.tg_node.send_expect(command.format(**locals()), "#") def compile_example_app(self): """ Builds the example app and checks for errors. """ - self.dut.send_expect("rm -rf examples/ip_reassembly/build", "#") - out = self.dut.build_dpdk_apps("examples/ip_reassembly") + self.sut_node.send_expect("rm -rf examples/ip_reassembly/build", "#") + out = self.sut_node.build_dpdk_apps("examples/ip_reassembly") def execute_example_app(self): """ @@ -127,10 +127,10 @@ class TestIpReassembly(TestCase): command = ( "./%s {eal_para} " % self.app_ip_reassembly_path - + "-- -p {dut_port_mask} " + + "-- -p {sut_port_mask} " + "--maxflows={maxflows} --flowttl={flowttl} {extra_args}" ) - self.dut.send_expect(command.format(**self.test_config.__dict__), "Link [Uu]p") + self.sut_node.send_expect(command.format(**self.test_config.__dict__), "Link [Uu]p") def tcp_ipv4_fragments(self, src_ip, identifier): """ @@ -208,26 +208,26 @@ class TestIpReassembly(TestCase): def scapy_send_packets(self): """ - Calling scapy from the tester board sends the generated PCAP file to - the DUT + Calling scapy from the TG board sends the generated PCAP file to + the SUT """ - self.tester.scapy_append('pcap = rdpcap("%s")' % self.test_config.pcap_file) - self.tester.scapy_append( - 'sendp(pcap, iface="%s", verbose=False)' % self.test_config.tester_iface + self.tg_node.scapy_append('pcap = rdpcap("%s")' % self.test_config.pcap_file) + self.tg_node.scapy_append( + 'sendp(pcap, iface="%s", verbose=False)' % self.test_config.tg_iface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(5) def send_packets(self): """ - Goes through all the steps to send packets from the tester to the self.dut. - Generates the PCAP file, place it into the tester board, calls scapy and + Goes through all the steps to send packets from the TG to the self.sut_node. + Generates the PCAP file, place it into the TG board, calls scapy and finally removes the PCAP file. """ self.create_pcap_file() - self.tester.session.copy_file_to(self.test_config.pcap_file) + self.tg_node.session.copy_file_to(self.test_config.pcap_file) self.scapy_send_packets() os.remove(self.test_config.pcap_file) time.sleep(5) @@ -239,18 +239,18 @@ class TestIpReassembly(TestCase): def tcpdump_start_sniffing(self): """ - Starts tcpdump in the background to sniff the tester interface where - the packets are transmitted to and from the self.dut. + Starts tcpdump in the background to sniff the TG interface where + the packets are transmitted to and from the self.sut_node. All the captured packets are going to be stored in a file for a post-analysis. """ command = ( "tcpdump -w tcpdump.pcap -i %s 2>tcpdump.out &" - % self.test_config.tester_iface + % self.test_config.tg_iface ) - self.tester.send_expect("rm -f tcpdump.pcap", "#") - self.tester.send_expect(command, "#") + self.tg_node.send_expect("rm -f tcpdump.pcap", "#") + self.tg_node.send_expect(command, "#") time.sleep(self.wait_interval_for_tcpdump) def tcpdump_stop_sniff(self): @@ -258,9 +258,9 @@ class TestIpReassembly(TestCase): Stops the tcpdump process running in the background. """ time.sleep(self.wait_interval_for_tcpdump) - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") # For the [pid]+ Done tcpdump... message after killing the process - self.tester.send_expect("cat tcpdump.out", "#") + self.tg_node.send_expect("cat tcpdump.out", "#") time.sleep(3) def tcpdump_command(self, command): @@ -268,13 +268,13 @@ class TestIpReassembly(TestCase): Sends a tcpdump related command and returns an integer from the output """ - result = self.tester.send_expect(command, "#") + result = self.tg_node.send_expect(command, "#") return int(result.strip().split()[0]) def number_of_received_packets(self, tcp_port): """ By reading the file generated by tcpdump it counts how many packets were - forwarded by the sample app and received in the self.tester. The sample app + forwarded by the sample app and received in the self.tg_node. The sample app will add a known MAC address for the test to look for. """ @@ -287,7 +287,7 @@ class TestIpReassembly(TestCase): def number_of_sent_packets(self, mac_src): """ By reading the file generated by tcpdump it counts how many packets were - sent to the DUT searching for a given MAC address. + sent to the SUT searching for a given MAC address. """ command = ( @@ -387,12 +387,12 @@ class TestIpReassembly(TestCase): Builds the sample app and set the shell prompt to a known and value. """ - self.tester.send_expect('export PS1="# "', "#") + self.tg_node.send_expect('export PS1="# "', "#") self.compile_example_app() - self.app_ip_reassembly_path = self.dut.apps_name["ip_reassembly"] - dut_ports = self.dut.get_ports(self.nic) - dut_port = dut_ports[0] - self.destination_mac = self.dut.get_mac_address(dut_port) + self.app_ip_reassembly_path = self.sut_node.apps_name["ip_reassembly"] + sut_ports = self.sut_node.get_ports(self.nic) + sut_port = sut_ports[0] + self.destination_mac = self.sut_node.get_mac_address(sut_port) def test_send_1K_frames_split_in_4_and_1K_maxflows(self): """ @@ -440,13 +440,13 @@ class TestIpReassembly(TestCase): self.execute_example_app() self.send_n_siff_packets() self.verify_all() - self.dut.send_expect("^C", "# ") + self.sut_node.send_expect("^C", "# ") time.sleep(5) self.set_max_num_of_fragments(4) time.sleep(5) except Exception as e: - self.dut.send_expect("^C", "# ") + self.sut_node.send_expect("^C", "# ") time.sleep(2) self.set_max_num_of_fragments() self.compile_example_app() @@ -533,14 +533,14 @@ class TestIpReassembly(TestCase): fragments = self.create_fragments() self.write_shuffled_pcap(fragments[:3]) - self.tester.session.copy_file_to(self.test_config.pcap_file) + self.tg_node.session.copy_file_to(self.test_config.pcap_file) self.scapy_send_packets() os.remove(self.test_config.pcap_file) time.sleep(3) self.write_shuffled_pcap(fragments[3:]) - self.tester.session.copy_file_to(self.test_config.pcap_file) + self.tg_node.session.copy_file_to(self.test_config.pcap_file) self.scapy_send_packets() os.remove(self.test_config.pcap_file) @@ -562,13 +562,13 @@ class TestIpReassembly(TestCase): self, payload_size=mtu - 100, fragment_size=2500 ) try: - self.set_tester_iface_mtu(self.test_config.tester_iface, mtu) + self.set_tg_iface_mtu(self.test_config.tg_iface, mtu) self.compile_example_app() self.execute_example_app() self.send_n_siff_packets() self.verify_all() except Exception as e: - self.set_tester_iface_mtu(self.test_config.tester_iface) + self.set_tg_iface_mtu(self.test_config.tg_iface) raise e def test_send_jumbo_frames_with_wrong_arguments(self): @@ -582,7 +582,7 @@ class TestIpReassembly(TestCase): self, payload_size=mtu - 100, fragment_size=2500 ) try: - self.set_tester_iface_mtu(self.test_config.tester_iface, mtu) + self.set_tg_iface_mtu(self.test_config.tg_iface, mtu) self.set_max_num_of_fragments(4) self.compile_example_app() self.execute_example_app() @@ -593,7 +593,7 @@ class TestIpReassembly(TestCase): ) self.verify_all() except Exception as e: - self.set_tester_iface_mtu(self.test_config.tester_iface) + self.set_tg_iface_mtu(self.test_config.tg_iface) raise e def tear_down(self): @@ -601,11 +601,11 @@ class TestIpReassembly(TestCase): Run after each test case. """ - self.dut.send_expect("^C", "# ") + self.sut_node.send_expect("^C", "# ") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py b/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py index cf067d01..1a0d4225 100644 --- a/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py +++ b/tests/TestSuite_ixgbe_vf_get_extra_queue_information.py @@ -25,7 +25,7 @@ class TestIxgbeVfGetExtraInfo(TestCase): """ Get rx queue packets and bytes. """ - out = self.vm0_dut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") lines = out.split("\r\n") for line in lines: @@ -48,16 +48,16 @@ class TestIxgbeVfGetExtraInfo(TestCase): else: rev_num, rev_byte = self.get_packet_bytes("0") - self.tester.scapy_foreground() - self.tester.scapy_append('sys.path.append("./")') - self.vm0_vf0_mac = self.vm0_dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('sys.path.append("./")') + self.vm0_vf0_mac = self.vm0_sut.get_mac_address(0) # send packet with different parameters packet = ( r'sendp([Ether(src="%s",dst="%s")/Dot1Q(prio=%s, vlan=%s)/IP()/Raw("x"*20)], iface="%s")' - % (self.src_mac, self.vm0_vf0_mac, prio, vlan, self.tester_intf) + % (self.src_mac, self.vm0_vf0_mac, prio, vlan, self.tg_intf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) if prio == "1" or prio == "2" or prio == "3": @@ -84,22 +84,22 @@ class TestIxgbeVfGetExtraInfo(TestCase): """ rev_num0, rev_byte0 = self.get_packet_bytes("0") rev_num1, rev_byte1 = self.get_packet_bytes("1") - self.tester.scapy_foreground() - self.tester.scapy_append('sys.path.append("./")') - self.vm0_vf0_mac = self.vm0_dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('sys.path.append("./")') + self.vm0_vf0_mac = self.vm0_sut.get_mac_address(0) # send packet with different parameters if ptype == "ip": packet = ( r'sendp([Ether(src="%s",dst="%s")/IP()/Raw("x"*20)], count=100, iface="%s")' - % (self.src_mac, self.vm0_vf0_mac, self.tester_intf) + % (self.src_mac, self.vm0_vf0_mac, self.tg_intf) ) elif ptype == "udp": packet = ( r'sendp([Ether(src="%s",dst="%s")/IP(src="192.168.0.1", dst="192.168.0.3")/UDP(sport=23,dport=24)/Raw("x"*20)], count=100, iface="%s")' - % (self.src_mac, self.vm0_vf0_mac, self.tester_intf) + % (self.src_mac, self.vm0_vf0_mac, self.tg_intf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() rev_num_after0, rev_byte_after0 = self.get_packet_bytes("0") rev_num_after1, rev_byte_after1 = self.get_packet_bytes("1") @@ -125,23 +125,23 @@ class TestIxgbeVfGetExtraInfo(TestCase): self.verify( self.nic in ["IXGBE_10G-82599_SFP"], "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.cores = "1S/8C/1T" - self.pf_mac = self.dut.get_mac_address(self.dut_ports[0]) - txport = self.tester.get_local_port(self.dut_ports[0]) - self.tester_intf = self.tester.get_interface(txport) - self.tester_mac = self.tester.get_mac(txport) + self.pf_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + txport = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_intf = self.tg_node.get_interface(txport) + self.tg_mac = self.tg_node.get_mac(txport) - self.pf_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] + self.pf_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.src_mac = "00:02:00:00:00:01" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver="igb_uio") - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver="igb_uio") + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver("vfio-pci") time.sleep(1) @@ -157,54 +157,54 @@ class TestIxgbeVfGetExtraInfo(TestCase): 1pf -> 1vf , vf->vm0 """ vf0_prop_1 = {"opt_host": self.sriov_vfs_port[0].pci} - self.vm0 = QEMUKvm(self.dut, "vm0", "ixgbe_vf_get_extra_queue_information") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "ixgbe_vf_get_extra_queue_information") self.vm0.set_vm_device(driver="vfio-pci", **vf0_prop_1) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM ENV failed") else: self.verify( - self.vm0_dut.ports_info[0]["intf"] != "N/A", "Not interface" + self.vm0_sut.ports_info[0]["intf"] != "N/A", "Not interface" ) except Exception as e: self.destroy_vm_env() self.logger.error("Failure for %s" % str(e)) - self.vm0_vf0_mac = self.vm0_dut.get_mac_address(0) - self.vm0_intf0 = self.vm0_dut.ports_info[0]["intf"] + self.vm0_vf0_mac = self.vm0_sut.get_mac_address(0) + self.vm0_intf0 = self.vm0_sut.ports_info[0]["intf"] - self.vm0_dut.restore_interfaces_linux() + self.vm0_sut.restore_interfaces_linux() def destroy_vm_env(self): """ destroy vm environment """ if getattr(self, "vm0", None): - self.vm0_dut.kill_all() - self.vm0_dut_ports = None + self.vm0_sut.kill_all() + self.vm0_sut_ports = None self.vm0.stop() self.vm0 = None - self.dut.virt_exit() + self.sut_node.virt_exit() def destroy_vf_env(self): """ destroy vf """ - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] - self.used_dut_port = None + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] + self.used_sut_port = None def verify_rx_queue(self, num): """ verify the rx queue number """ # pf up + vf up -> vf up - self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") time.sleep(10) - out = self.vm0_dut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") self.verify(("rx_queue_%d" % (num - 1)) in out, "Wrong rx queue number") time.sleep(3) @@ -213,13 +213,13 @@ class TestIxgbeVfGetExtraInfo(TestCase): DPDK PF, kernel VF, enable DCB mode with TC=4 """ # start testpmd with PF on the host - self.dut_testpmd = PmdOutput(self.dut) - self.dut_testpmd.start_testpmd( + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_testpmd.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --nb-cores=4", "-a %s" % self.pf_pci ) - self.dut_testpmd.execute_cmd("port stop 0") - self.dut_testpmd.execute_cmd("port config 0 dcb vt on 4 pfc off") - self.dut_testpmd.execute_cmd("port start 0") + self.sut_testpmd.execute_cmd("port stop 0") + self.sut_testpmd.execute_cmd("port config 0 dcb vt on 4 pfc off") + self.sut_testpmd.execute_cmd("port start 0") time.sleep(5) self.setup_vm_env() # verify the vf get the extra info. @@ -240,11 +240,11 @@ class TestIxgbeVfGetExtraInfo(TestCase): DPDK PF, kernel VF, disable DCB mode """ # start testpmd with PF on the host - self.dut_testpmd = PmdOutput(self.dut) - self.dut_testpmd.start_testpmd( + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_testpmd.start_testpmd( "%s" % self.cores, "--rxq=2 --txq=2 --nb-cores=2", "-a %s" % self.pf_pci ) - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("start") time.sleep(5) self.setup_vm_env() # verify the vf get the extra info. @@ -261,7 +261,7 @@ class TestIxgbeVfGetExtraInfo(TestCase): """ Run after each test case. """ - self.dut_testpmd.quit() + self.sut_testpmd.quit() self.destroy_vm_env() time.sleep(2) @@ -270,5 +270,5 @@ class TestIxgbeVfGetExtraInfo(TestCase): Run after each test suite. """ self.destroy_vf_env() - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) diff --git a/tests/TestSuite_jumboframes.py b/tests/TestSuite_jumboframes.py index 08963b72..9f83a1f6 100644 --- a/tests/TestSuite_jumboframes.py +++ b/tests/TestSuite_jumboframes.py @@ -45,22 +45,22 @@ class TestJumboframes(TestCase): int(_) for _ in self.jumboframes_get_stat(self.tx_port, "rx") ] - itf = self.tester.get_interface(self.tester.get_local_port(self.tx_port)) - mac = self.dut.get_mac_address(self.tx_port) + itf = self.tg_node.get_interface(self.tg_node.get_local_port(self.tx_port)) + mac = self.sut_node.get_mac_address(self.tx_port) # The packet total size include ethernet header, ip header, and payload. # ethernet header length is 18 bytes, ip standard header length is 20 bytes. pktlen = pktsize - ETHER_HEADER_LEN padding = pktlen - IP_HEADER_LEN - self.tester.scapy_foreground() - self.tester.scapy_append('nutmac="%s"' % mac) - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('nutmac="%s"' % mac) + self.tg_node.scapy_append( 'sendp([Ether(dst=nutmac, src="52:00:00:00:00:00")/IP(len=%s)/Raw(load="\x50"*%s)], iface="%s")' % (pktlen, padding, itf) ) - out = self.tester.scapy_execute() + out = self.tg_node.scapy_execute() sleep(5) tx_pkts, _, tx_bytes = [ @@ -104,32 +104,32 @@ class TestJumboframes(TestCase): """ Prerequisite steps for each test suit. """ - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.rx_port = self.dut_ports[0] - self.tx_port = self.dut_ports[0] + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.rx_port = self.sut_ports[0] + self.tx_port = self.sut_ports[0] - cores = self.dut.get_core_list("1S/2C/1T") + cores = self.sut_node.get_core_list("1S/2C/1T") self.coremask = utils.create_mask(cores) self.port_mask = utils.create_mask([self.rx_port, self.tx_port]) - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %s" % ( - self.tester.get_interface(self.tester.get_local_port(self.rx_port)), + self.tg_node.get_interface(self.tg_node.get_local_port(self.rx_port)), ETHER_JUMBO_FRAME_MTU + 200, ), "# ", ) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ This is to clear up environment before the case run. """ - self.dut.kill_all() + self.sut_node.kill_all() def test_jumboframes_normal_nojumbo(self): """ @@ -141,14 +141,14 @@ class TestJumboframes(TestCase): "--max-pkt-len=%d --port-topology=loop --tx-offloads=0x8000" % (ETHER_STANDARD_MTU), ) - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.jumboframes_send_packet(ETHER_STANDARD_MTU - 1) self.jumboframes_send_packet(ETHER_STANDARD_MTU) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_jumboframes_jumbo_nojumbo(self): """ @@ -160,13 +160,13 @@ class TestJumboframes(TestCase): "--max-pkt-len=%d --port-topology=loop --tx-offloads=0x8000" % (ETHER_STANDARD_MTU), ) - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.jumboframes_send_packet(ETHER_STANDARD_MTU + 1, False) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_jumboframes_normal_jumbo(self): """ @@ -178,14 +178,14 @@ class TestJumboframes(TestCase): "--max-pkt-len=%s --port-topology=loop --tx-offloads=0x8000" % (ETHER_JUMBO_FRAME_MTU), ) - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.jumboframes_send_packet(ETHER_STANDARD_MTU - 1) self.jumboframes_send_packet(ETHER_STANDARD_MTU) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_jumboframes_jumbo_jumbo(self): """ @@ -197,15 +197,15 @@ class TestJumboframes(TestCase): "--max-pkt-len=%s --port-topology=loop --tx-offloads=0x8000" % (ETHER_JUMBO_FRAME_MTU), ) - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.jumboframes_send_packet(ETHER_STANDARD_MTU + 1) self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU - 1) self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_jumboframes_bigger_jumbo(self): """ @@ -217,8 +217,8 @@ class TestJumboframes(TestCase): "--max-pkt-len=%s --port-topology=loop --tx-offloads=0x8000" % (ETHER_JUMBO_FRAME_MTU), ) - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") """ On 1G NICs, when the jubmo frame MTU set as 9000, the software adjust it to 9004. @@ -233,23 +233,23 @@ class TestJumboframes(TestCase): else: self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU + 1, False) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ When the case of this test suite finished, the environment should clear up. """ - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %s" % ( - self.tester.get_interface(self.tester.get_local_port(self.rx_port)), + self.tg_node.get_interface(self.tg_node.get_local_port(self.rx_port)), ETHER_STANDARD_MTU, ), "# ", diff --git a/tests/TestSuite_keep_alive.py b/tests/TestSuite_keep_alive.py index 32947a97..732ef9f4 100644 --- a/tests/TestSuite_keep_alive.py +++ b/tests/TestSuite_keep_alive.py @@ -20,14 +20,14 @@ class TestKeepAlive(TestCase): Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + cores = self.sut_node.get_core_list("1S/4C/1T") self.coremask = utils.create_mask(cores) - self.app_l2fwd_keepalive_path = self.dut.apps_name["l2fwd-keepalive"] + self.app_l2fwd_keepalive_path = self.sut_node.apps_name["l2fwd-keepalive"] # build sample app - out = self.dut.build_dpdk_apps("./examples/l2fwd-keepalive") + out = self.sut_node.build_dpdk_apps("./examples/l2fwd-keepalive") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -41,38 +41,38 @@ class TestKeepAlive(TestCase): """ Verify netmap compatibility with one port """ - eal_para = self.dut.create_eal_parameters(cores=list(range(4))) + eal_para = self.sut_node.create_eal_parameters(cores=list(range(4))) cmd = self.app_l2fwd_keepalive_path + " %s -- -q 8 -p ffff -K 10" % eal_para - self.dut.send_expect(cmd, "Port statistics", 60) + self.sut_node.send_expect(cmd, "Port statistics", 60) self.scapy_send_packet(2000) - out = self.dut.get_session_output(timeout=10) + out = self.sut_node.get_session_output(timeout=10) print(out) p = re.compile(r"\d+") result = p.findall(out) - amount = 2000 * len(self.dut_ports) + amount = 2000 * len(self.sut_ports) self.verify(str(amount) in result, "Wrong: can't get <%d> package" % amount) def scapy_send_packet(self, nu): """ Send a packet to port """ - for i in range(len(self.dut_ports)): - txport = self.tester.get_local_port(self.dut_ports[i]) - mac = self.dut.get_mac_address(self.dut_ports[i]) - txItf = self.tester.get_interface(txport) - self.tester.scapy_append( + for i in range(len(self.sut_ports)): + txport = self.tg_node.get_local_port(self.sut_ports[i]) + mac = self.sut_node.get_mac_address(self.sut_ports[i]) + txItf = self.tg_node.get_interface(txport) + self.tg_node.scapy_append( 'sendp([Ether(dst="%s")/IP()/UDP()/Raw(\'X\'*18)], iface="%s",count=%s)' % (mac, txItf, nu) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) pass diff --git a/tests/TestSuite_kernelpf_iavf.py b/tests/TestSuite_kernelpf_iavf.py index bb20e687..982f6a13 100644 --- a/tests/TestSuite_kernelpf_iavf.py +++ b/tests/TestSuite_kernelpf_iavf.py @@ -14,8 +14,8 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase from framework.utils import RED @@ -32,8 +32,8 @@ class TestKernelpfIavf(TestCase): supported_vf_driver = ["pci-stub", "vfio-pci"] def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") self.vm0 = None self.env_done = False self.interrupt_flag = False @@ -43,13 +43,13 @@ class TestKernelpfIavf(TestCase): # get driver version self.driver_version = self.nic_obj.driver_version - self.port = self.dut_ports[0] + self.port = self.sut_ports[0] self.vm_port = 0 - cores = self.dut.get_core_list("1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") self.port_mask = utils.create_mask([self.port]) # set vf assign method and vf driver - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "pci-stub" @@ -58,22 +58,22 @@ class TestKernelpfIavf(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.used_dut_port = self.dut_ports[0] - self.host_intf = self.dut.ports_info[self.used_dut_port]["intf"] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - self.tester_mac = self.tester.get_mac(tester_port) + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.used_sut_port = self.sut_ports[0] + self.host_intf = self.sut_node.ports_info[self.used_sut_port]["intf"] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + self.tg_mac = self.tg_node.get_mac(tg_port) - tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_intf1 = self.tester.get_interface(tester_port1) - self.l3fwdpower_name = self.dut.apps_name["l3fwd-power"].strip().split("/")[-1] + tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_intf1 = self.tg_node.get_interface(tg_port1) + self.l3fwdpower_name = self.sut_node.apps_name["l3fwd-power"].strip().split("/")[-1] # bind to default driver - self.bind_nic_driver(self.dut_ports, driver="") + self.bind_nic_driver(self.sut_ports, driver="") # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state(self.host_intf, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.host_intf, self.flag) def set_up(self): @@ -94,29 +94,29 @@ class TestKernelpfIavf(TestCase): return # bind to default driver - self.bind_nic_driver(self.dut_ports, driver="") - self.used_dut_port = self.dut_ports[0] + self.bind_nic_driver(self.sut_ports, driver="") + self.used_sut_port = self.sut_ports[0] if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.host_intf, self.flag), "# " ) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] - out = self.dut.send_expect("ethtool %s" % self.host_intf, "#") + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] + out = self.sut_node.send_expect("ethtool %s" % self.host_intf, "#") self.speed = int(re.findall("Speed: (\d*)", out)[0]) // 1000 if self.is_eth_series_nic(800): - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % (self.host_intf), "# " ) if self.running_case == "test_vf_multicast": - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s vf-true-promisc-support on" % (self.host_intf), "# ", ) if set_vf_mac is True: self.vf_mac = "00:01:23:45:67:89" - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# " ) @@ -129,13 +129,13 @@ class TestKernelpfIavf(TestCase): vf_popt = {"opt_host": self.sriov_vfs_port[0].pci} # set up VM ENV - self.vm = VM(self.dut, "vm0", "kernelpf_iavf") + self.vm = VM(self.sut_node, "vm0", "kernelpf_iavf") self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt) - self.vm_dut = self.vm.start() - if self.vm_dut is None: + self.vm_sut = self.vm.start() + if self.vm_sut is None: raise Exception("Set up VM ENV failed!") - self.vm_testpmd = PmdOutput(self.vm_dut) + self.vm_testpmd = PmdOutput(self.vm_sut) except Exception as e: self.destroy_vm_env() raise Exception(e) @@ -143,21 +143,21 @@ class TestKernelpfIavf(TestCase): def destroy_vm_env(self): if getattr(self, "vm", None): - if getattr(self, "vm_dut", None): - self.vm_dut.kill_all() + if getattr(self, "vm_sut", None): + self.vm_sut.kill_all() self.vm_testpmd = None - self.vm_dut_ports = None + self.vm_sut_ports = None # destroy vm0 self.vm.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() time.sleep(3) self.vm = None - if getattr(self, "used_dut_port", None) is not None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - self.used_dut_port = None + if getattr(self, "used_sut_port", None) is not None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + self.used_sut_port = None - self.bind_nic_driver(self.dut_ports, driver="default") + self.bind_nic_driver(self.sut_ports, driver="default") self.env_done = False @@ -175,20 +175,20 @@ class TestKernelpfIavf(TestCase): def send_random_pkt(self, dts, count=1): tgen_ports = [] - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_ports.append((tx_port, rx_port)) - src_mac = self.tester.get_mac(tx_port) + src_mac = self.tg_node.get_mac(tx_port) dst_mac = dts pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] - result = self.tester.check_random_pkts( + result = self.tg_node.check_random_pkts( tgen_ports, pktnum=count, allow_miss=False, params=pkt_param ) return result def test_vf_basic_rxtx(self): """ - Set rxonly forward,Send 100 random packets from tester, check packets can be received + Set rxonly forward,Send 100 random packets from TG, check packets can be received """ self.vm_testpmd.start_testpmd("all") self.vm_testpmd.execute_cmd("set fwd rxonly") @@ -196,23 +196,23 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("start") self.send_random_pkt(self.vf_mac, count=100) time.sleep(1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.vf_mac in out, "vf receive packet fail") stats = self.vm_testpmd.get_pmd_stats(0) self.verify(stats["RX-packets"] >= 100, "vf receive packet num is not match") """ - Set txonly forward,check packets can be received by tester + Set txonly forward,check packets can be received by TG """ self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.execute_cmd("set fwd txonly") - self.tester.send_expect("rm -f tcpdump.pcap", "#") - self.tester.send_expect("tcpdump -i %s 2>tcpdump.out &" % self.tester_intf, "#") + self.tg_node.send_expect("rm -f tcpdump.pcap", "#") + self.tg_node.send_expect("tcpdump -i %s 2>tcpdump.out &" % self.tg_intf, "#") self.vm_testpmd.execute_cmd("start") time.sleep(1) self.vm_testpmd.execute_cmd("stop") - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") time.sleep(1) - cap_packet = self.tester.send_expect("cat tcpdump.out", "#", 30) + cap_packet = self.tg_node.send_expect("cat tcpdump.out", "#", 30) stats = self.vm_testpmd.get_pmd_stats(0) cap_tcp_num = re.findall("(\d+) packets", cap_packet) nums = sum(map(int, cap_tcp_num)) @@ -272,38 +272,38 @@ class TestKernelpfIavf(TestCase): """ Enable kernel trust mode """ - self.dut.send_expect("ip link set dev %s vf 0 trust on" % self.host_intf, "# ") + self.sut_node.send_expect("ip link set dev %s vf 0 trust on" % self.host_intf, "# ") self.vm_testpmd.start_testpmd("all") self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("set verbose 1") self.vm_testpmd.execute_cmd("start") # send packet with current mac, vf can receive and forward packet self.send_random_pkt(self.vf_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.vf_mac in out, "vf receive pkt fail with current mac") # send packet with wrong mac, vf can receive and forward packet self.send_random_pkt(self.wrong_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.wrong_mac in out, "vf receive pkt fail with wrong mac") self.vm_testpmd.execute_cmd("set promisc all off") # send packet with current mac, vf can receive and forward packet self.send_random_pkt(self.vf_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.vf_mac in out, "vf receive pkt fail with current mac") # send packet with wrong mac, vf can not receive and forward packet self.send_random_pkt(self.wrong_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.wrong_mac not in out, "vf receive pkt with wrong mac") self.vm_testpmd.execute_cmd("set promisc all on") # send packet with current mac, vf can receive and forward packet self.send_random_pkt(self.vf_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.vf_mac in out, "vf receive pkt fail with current mac") # send packet with wrong mac, vf can receive and forward packet self.send_random_pkt(self.wrong_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.wrong_mac in out, "vf receive pkt fail with wrong mac") def test_vf_multicast(self): @@ -311,7 +311,7 @@ class TestKernelpfIavf(TestCase): enable kernel trust mode """ multicast_mac = "01:80:C2:00:00:08" - self.dut.send_expect("ip link set dev %s vf 0 trust on" % self.host_intf, "# ") + self.sut_node.send_expect("ip link set dev %s vf 0 trust on" % self.host_intf, "# ") self.vm_testpmd.start_testpmd("all") self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("set promisc all off") @@ -319,18 +319,18 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("set verbose 1") self.vm_testpmd.execute_cmd("start") self.send_random_pkt(self.vf_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.vf_mac in out, "vf receive pkt fail with current mac") self.send_random_pkt(multicast_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(multicast_mac not in out, "vf receive pkt with multicast mac") self.vm_testpmd.execute_cmd("set allmulti all on") self.send_random_pkt(self.vf_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(self.vf_mac in out, "vf receive pkt fail with current mac") self.send_random_pkt(multicast_mac, count=1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify(multicast_mac in out, "vf receive pkt fail with multicast mac") def test_vf_broadcast(self): @@ -343,10 +343,10 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("start") self.send_random_pkt(broadcast_mac, count=1) time.sleep(1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() print(out) self.verify( - broadcast_mac.upper() in out and self.tester_mac.upper() in out, + broadcast_mac.upper() in out and self.tg_mac.upper() in out, "vf receive pkt fail with broadcast mac", ) @@ -355,35 +355,35 @@ class TestKernelpfIavf(TestCase): vf can receive packet with right vlan id, can't receive wrong vlan id packet """ random_vlan = random.randint(1, MAX_VLAN) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 vlan %s" % (self.host_intf, random_vlan), "# " ) - out = self.dut.send_expect("ip link show %s" % self.host_intf, "# ") + out = self.sut_node.send_expect("ip link show %s" % self.host_intf, "# ") self.verify("vlan %d" % random_vlan in out, "Failed to add pvid on VF0") self.vm_testpmd.start_testpmd("all") self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("set verbose 1") self.vm_testpmd.execute_cmd("start") - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) out = self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") tcpdump_out = self.get_tcpdump_package() self.verify(self.vf_mac in out, "testpmd can't receive packet") receive_pkt = re.findall("vlan %s" % random_vlan, tcpdump_out) self.verify(len(receive_pkt) == 2, "Failed to received vlan packet!!!") wrong_vlan = (random_vlan + 1) % 4096 - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) out = self.send_and_getout(vlan=wrong_vlan, pkt_type="VLAN_UDP") tcpdump_out = self.get_tcpdump_package() self.verify(self.vf_mac not in out, "received wrong vlan packet!!!") receive_pkt = re.findall("vlan %s" % wrong_vlan, tcpdump_out) - self.verify(len(receive_pkt) == 1, "tester received wrong vlan packet!!!") + self.verify(len(receive_pkt) == 1, "TG received wrong vlan packet!!!") # remove vlan self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.execute_cmd("port stop all") - self.dut.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf, "# ") - out = self.dut.send_expect("ip link show %s" % self.host_intf, "# ") + self.sut_node.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf, "# ") + out = self.sut_node.send_expect("ip link show %s" % self.host_intf, "# ") self.verify("vlan %d" % random_vlan not in out, "Failed to remove pvid on VF0") # send packet without vlan self.vm_testpmd.execute_cmd("port reset 0") @@ -397,29 +397,29 @@ class TestKernelpfIavf(TestCase): self.verify(self.vf_mac in out, "Not recevied packet with vlan 0!!!") # send random vlan packet - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) out = self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("vlan %s" % random_vlan, tcpdump_out) if self.kdriver == "i40e" and self.driver_version < "2.13.10": - self.verify(len(receive_pkt) == 2, "fail to tester received vlan packet!!!") + self.verify(len(receive_pkt) == 2, "fail to TG received vlan packet!!!") self.verify(self.vf_mac in out, "Failed to received vlan packet!!!") else: - self.verify(len(receive_pkt) == 1, "fail to tester received vlan packet!!!") + self.verify(len(receive_pkt) == 1, "fail to TG received vlan packet!!!") self.verify(self.vf_mac not in out, "Received vlan packet!!!") def send_and_getout(self, vlan=0, pkt_type="UDP"): if pkt_type == "UDP": - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": self.vf_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": self.vf_mac}) elif pkt_type == "VLAN_UDP": - pkt = Packet(pkt_type="VLAN_UDP") - pkt.config_layer("vlan", {"vlan": vlan}) - pkt.config_layer("ether", {"dst": self.vf_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") + scapy_pkt_builder.config_layer("vlan", {"vlan": vlan}) + scapy_pkt_builder.config_layer("ether", {"dst": self.vf_mac}) - pkt.send_pkt(self.tester, tx_port=self.tester_intf) - out = self.vm_dut.get_session_output(timeout=2) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf) + out = self.vm_sut.get_session_output(timeout=2) return out @@ -441,7 +441,7 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("rx_vlan add 1 0") # send packet vlan 1, vf can receive packet - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) out = self.send_and_getout(vlan=1, pkt_type="VLAN_UDP") tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("vlan 1", tcpdump_out) @@ -488,7 +488,7 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("set verbose 1") self.vm_testpmd.execute_cmd("start") - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) out = self.send_and_getout(pkt_type="UDP") tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("vlan %s" % random_vlan, tcpdump_out) @@ -509,7 +509,7 @@ class TestKernelpfIavf(TestCase): # enable strip self.vm_testpmd.execute_cmd("vlan set strip on 0") - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") tcpdump_out = self.get_tcpdump_package() self.verify( @@ -521,7 +521,7 @@ class TestKernelpfIavf(TestCase): # disable strip self.vm_testpmd.execute_cmd("vlan set strip off 0") - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") tcpdump_out = self.get_tcpdump_package() self.verify( @@ -558,7 +558,7 @@ class TestKernelpfIavf(TestCase): # disable filter self.vm_testpmd.execute_cmd("rx_vlan rm %d 0" % random_vlan) self.vm_testpmd.execute_cmd("vlan set filter off 0") - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") time.sleep(1) tcpdump_out = self.get_tcpdump_package() @@ -569,8 +569,8 @@ class TestKernelpfIavf(TestCase): self.verify(len(receive_pkt) == 1, "Failed to received vlan packet!!!") def test_vf_without_jumboframe(self): - self.tester.send_expect( - "ifconfig %s mtu %s" % (self.tester_intf, ETHER_JUMBO_FRAME_MTU), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %s" % (self.tg_intf, ETHER_JUMBO_FRAME_MTU), "#" ) self.vm_testpmd.start_testpmd("all") @@ -578,13 +578,13 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("start") self.jumboframes_send_packet(ETHER_STANDARD_MTU - 1, True) self.jumboframes_send_packet(ETHER_STANDARD_MTU + 1 + 4 + 4, False) - self.tester.send_expect( - "ifconfig %s mtu %s" % (self.tester_intf, ETHER_STANDARD_MTU), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %s" % (self.tg_intf, ETHER_STANDARD_MTU), "#" ) def test_vf_with_jumboframe(self): - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf, ETHER_JUMBO_FRAME_MTU), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf, ETHER_JUMBO_FRAME_MTU), "#" ) conf_pkt_len = 3000 self.vm_testpmd.start_testpmd( @@ -595,8 +595,8 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("start") self.jumboframes_send_packet(conf_pkt_len - 1, True) self.jumboframes_send_packet(conf_pkt_len + 1, False) - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf, ETHER_STANDARD_MTU), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf, ETHER_STANDARD_MTU), "#" ) def jumboframes_send_packet(self, pktsize, received=True): @@ -610,10 +610,10 @@ class TestKernelpfIavf(TestCase): int(_) for _ in self.jumboframes_get_stat(self.vm_port, "rx") ] - pkt = Packet(pkt_type="UDP", pkt_len=pktsize) - pkt.config_layer("ether", {"dst": self.vf_mac, "src": self.tester_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=pktsize) + scapy_pkt_builder.config_layer("ether", {"dst": self.vf_mac, "src": self.tg_mac}) self.vm_testpmd.execute_cmd("clear port stats all") - pkt.send_pkt(self.tester, tx_port=self.tester_intf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf) time.sleep(1) @@ -657,9 +657,9 @@ class TestKernelpfIavf(TestCase): for type in rss_type: self.vm_testpmd.execute_cmd("port config all rss %s" % type) self.vm_testpmd.execute_cmd("start") - self.send_packet(self.tester_intf, "IPV4&%s" % type) + self.send_packet(self.tg_intf, "IPV4&%s" % type) time.sleep(1) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() self.verify_packet_number(out) self.vm_testpmd.execute_cmd("clear port stats all") @@ -685,11 +685,11 @@ class TestKernelpfIavf(TestCase): if pkt == "": pkt = ( "sendp([Ether(dst='%s')/IP(src='1.2.3.4')/Raw(load='X'*30)], iface='%s')" - % (self.vf_mac, self.tester_intf) + % (self.vf_mac, self.tg_intf) ) - self.tester.scapy_append(pkt) - self.tester.scapy_execute() - out = self.vm_dut.get_session_output() + self.tg_node.scapy_append(pkt) + self.tg_node.scapy_execute() + out = self.vm_sut.get_session_output() p = re.compile("RSS hash=(0x\w+) - RSS queue=(0x\w+)") pkt_info = p.findall(out) self.verify(pkt_info, "received pkt have no hash") @@ -726,8 +726,8 @@ class TestKernelpfIavf(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", ' 'dst="192.168.0.%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV4&tcp": for i in range(30): @@ -736,8 +736,8 @@ class TestKernelpfIavf(TestCase): 'TCP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV4&udp": for i in range(30): @@ -746,10 +746,10 @@ class TestKernelpfIavf(TestCase): 'UDP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) def enable_hw_checksum(self): @@ -781,15 +781,15 @@ class TestKernelpfIavf(TestCase): } # Send packet. - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() for packet_type in list(packets_sent.keys()): - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' - % (packets_sent[packet_type], self.tester_intf) + % (packets_sent[packet_type], self.tg_intf) ) - self.start_tcpdump(self.tester_intf) - self.tester.scapy_execute() + self.start_tcpdump(self.tg_intf) + self.tg_node.scapy_execute() time.sleep(1) tcpdump_out = self.get_tcpdump_package() if packet_type == "IP/UDP": @@ -833,13 +833,13 @@ class TestKernelpfIavf(TestCase): self.checksum_verify() def test_vf_tso(self): - self.tester.send_expect( + self.tg_node.send_expect( "ethtool -K %s rx off tx off tso off gso off gro off lro off" - % self.tester_intf, + % self.tg_intf, "#", ) - self.tester.send_expect( - "ifconfig %s mtu %d" % (self.tester_intf, ETHER_JUMBO_FRAME_MTU), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %d" % (self.tg_intf, ETHER_JUMBO_FRAME_MTU), "#" ) self.vm_testpmd.start_testpmd( "all", "--port-topology=chained --max-pkt-len=%d" % ETHER_JUMBO_FRAME_MTU @@ -850,15 +850,15 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("tso set 1460 0") self.vm_testpmd.execute_cmd("port start all") self.vm_testpmd.execute_cmd("start") - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() time.sleep(5) - self.start_tcpdump(self.tester_intf) + self.start_tcpdump(self.tg_intf) pkt = ( 'sendp([Ether(dst="%s")/IP(chksum=0x1234)/TCP(flags=0x10,chksum=0x1234)/' - 'Raw(RandString(5214))], iface="%s")' % (self.vf_mac, self.tester_intf) + 'Raw(RandString(5214))], iface="%s")' % (self.vf_mac, self.tg_intf) ) - self.tester.scapy_append(pkt) - self.tester.scapy_execute() + self.tg_node.scapy_append(pkt) + self.tg_node.scapy_execute() time.sleep(5) out = self.get_tcpdump_package() self.verify_packet_segmentation(out) @@ -868,16 +868,16 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("port start all") self.vm_testpmd.execute_cmd("start") - self.start_tcpdump(self.tester_intf) - self.tester.scapy_append(pkt) - self.tester.scapy_execute() + self.start_tcpdump(self.tg_intf) + self.tg_node.scapy_append(pkt) + self.tg_node.scapy_execute() time.sleep(5) out = self.get_tcpdump_package() self.verify_packet_segmentation(out, seg=False) def start_tcpdump(self, rxItf): - self.tester.send_expect("rm -rf getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -A -nn -e -vv -w getPackageByTcpdump.cap -i %s 2> /dev/null& " % rxItf, "#", @@ -886,8 +886,8 @@ class TestKernelpfIavf(TestCase): def get_tcpdump_package(self): time.sleep(1) - self.tester.send_expect("killall tcpdump", "#") - return self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + return self.tg_node.send_expect( "tcpdump -A -nn -e -vv -r getPackageByTcpdump.cap", "#" ) @@ -900,12 +900,12 @@ class TestKernelpfIavf(TestCase): ) else: self.verify("length 1460: HTTP" not in out, "packet has segment") - # tester send packet with incorrect checksum + # TG send packet with incorrect checksum # vf fwd packet with corrent checksum self.verify( "incorrect" in out and "correct" in out, "checksum has incorrect" ) - self.tester.send_expect("^C", "#") + self.tg_node.send_expect("^C", "#") def test_vf_port_start_stop(self): self.vm_testpmd.start_testpmd("all") @@ -966,26 +966,26 @@ class TestKernelpfIavf(TestCase): def test_vf_rx_interrupt(self): # build l3-power - out = self.dut.build_dpdk_apps("./examples/l3fwd-power") + out = self.sut_node.build_dpdk_apps("./examples/l3fwd-power") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") - self.bind_nic_driver(self.dut_ports, driver="") + self.bind_nic_driver(self.sut_ports, driver="") self.create_2vf_in_host() # start l3fwd-power - l3fwd_app = self.dut.apps_name["l3fwd-power"] + l3fwd_app = self.sut_node.apps_name["l3fwd-power"] cmd = l3fwd_app + " -l 6,7 -n 4 -- -p 0x3 --config " + "'(0,0,6),(1,0,7)'" - self.dut.send_expect(cmd, "POWER", timeout=40) - out = self.dut.get_session_output() + self.sut_node.send_expect(cmd, "POWER", timeout=40) + out = self.sut_node.get_session_output() print(out) pattern = re.compile(r"(([a-f0-9]{2}:){5}[a-f0-9]{2})") mac_list = pattern.findall(out.lower()) vf0_mac = mac_list[0][0] vf1_mac = mac_list[1][0] # send packet to vf0 and vf1 - self.scapy_send_packet(vf0_mac, self.tester_intf) - self.scapy_send_packet(vf1_mac, self.tester_intf1) - out = self.dut.get_session_output() + self.scapy_send_packet(vf0_mac, self.tg_intf) + self.scapy_send_packet(vf1_mac, self.tg_intf1) + out = self.sut_node.get_session_output() self.verify( "L3FWD_POWER: lcore 6 is waked up from rx interrupt" in out, "lcore 6 is not waked up", @@ -1002,9 +1002,9 @@ class TestKernelpfIavf(TestCase): "L3FWD_POWER: lcore 7 sleeps until interrupt triggers" in out, "lcore 7 not sleep", ) - self.scapy_send_packet(vf0_mac, self.tester_intf, count=16) - self.scapy_send_packet(vf1_mac, self.tester_intf1, count=16) - out = self.dut.get_session_output() + self.scapy_send_packet(vf0_mac, self.tg_intf, count=16) + self.scapy_send_packet(vf1_mac, self.tg_intf1, count=16) + out = self.sut_node.get_session_output() self.verify( "L3FWD_POWER: lcore 6 is waked up from rx interrupt" in out, "lcore 6 is not waked up", @@ -1013,7 +1013,7 @@ class TestKernelpfIavf(TestCase): "L3FWD_POWER: lcore 7 is waked up from rx interrupt" in out, "lcore 7 is not waked up", ) - self.dut.send_expect( + self.sut_node.send_expect( "killall %s" % self.l3fwdpower_name, "# ", 60, alt_session=True ) @@ -1025,13 +1025,13 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("set allmulti all off") self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("start") - self.scapy_send_packet(self.wrong_mac, self.tester_intf, count=10) - out = self.vm_dut.get_session_output() + self.scapy_send_packet(self.wrong_mac, self.tg_intf, count=10) + out = self.vm_sut.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 0, "Not receive expected packet") - self.scapy_send_packet(self.vf_mac, self.tester_intf, count=10) - out = self.vm_dut.get_session_output() + self.scapy_send_packet(self.vf_mac, self.tg_intf, count=10) + out = self.vm_sut.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -1047,8 +1047,8 @@ class TestKernelpfIavf(TestCase): self.vm_testpmd.execute_cmd("start") # send 10 tagged packets, and check 10 tagged packets received - self.scapy_send_packet(self.vf_mac, self.tester_intf, vlan_flags=True, count=10) - out = self.vm_dut.get_session_output() + self.scapy_send_packet(self.vf_mac, self.tg_intf, vlan_flags=True, count=10) + out = self.vm_sut.get_session_output() packets = len(re.findall("received 1 packets", out)) if self.kdriver == "i40e" and self.driver_version < "2.13.10": self.verify(packets == 10, "Not receive expected packet") @@ -1056,8 +1056,8 @@ class TestKernelpfIavf(TestCase): self.verify(packets == 0, "Receive expected packet") # send 10 untagged packets, and check 10 untagged packets received - self.scapy_send_packet(self.vf_mac, self.tester_intf, count=10) - out = self.vm_dut.get_session_output() + self.scapy_send_packet(self.vf_mac, self.tg_intf, count=10) + out = self.vm_sut.get_session_output() packets = len(re.findall("received 1 packets", out)) self.verify(packets == 10, "Not receive expected packet") @@ -1066,34 +1066,34 @@ class TestKernelpfIavf(TestCase): Send a packet to port """ if count == 1: - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s")/IP()/UDP()/' "Raw('X'*18)], iface=\"%s\")" % (mac, testinterface) ) else: for i in range(count): if vlan_flags: - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s")/Dot1Q(id=0x8100, vlan=100)/IP(dst="127.0.0.%d")/UDP()/Raw(\'X\'*18)], ' 'iface="%s")' % (mac, i, testinterface) ) else: - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s")/IP(dst="127.0.0.%d")/UDP()/Raw(\'X\'*18)], ' 'iface="%s")' % (mac, i, testinterface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def create_2vf_in_host(self, driver=""): - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - - self.used_dut_port_1 = self.dut_ports[1] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver=driver) - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] - self.dut.send_expect("modprobe vfio", "#") - self.dut.send_expect("modprobe vfio-pci", "#") + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + + self.used_sut_port_1 = self.sut_ports[1] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 1, driver=driver) + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] + self.sut_node.send_expect("modprobe vfio", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") for port in self.sriov_vfs_port_0: port.bind_driver("vfio-pci") @@ -1101,19 +1101,19 @@ class TestKernelpfIavf(TestCase): port.bind_driver("vfio-pci") def destroy_2vf_in_2pf(self): - if getattr(self, "used_dut_port_0", None) is not None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - self.used_dut_port_0 = None - if getattr(self, "used_dut_port_1", None) is not None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - self.used_dut_port_1 = None + if getattr(self, "used_sut_port_0", None) is not None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + self.used_sut_port_0 = None + if getattr(self, "used_sut_port_1", None) is not None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + self.used_sut_port_1 = None def tear_down(self): """ Run after each test case. """ if self.running_case == "test_vf_rx_interrupt": - self.dut.send_expect( + self.sut_node.send_expect( "killall %s" % self.l3fwdpower_name, "# ", 60, alt_session=True ) self.destroy_2vf_in_2pf() @@ -1123,8 +1123,8 @@ class TestKernelpfIavf(TestCase): if self.running_case == "test_vf_mac_filter": self.destroy_vm_env() if self.running_case == "test_vf_add_pvid": - self.dut.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf, "# ") - self.dut.send_expect("ip link set dev %s vf 0 trust off" % self.host_intf, "# ") + self.sut_node.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf, "# ") + self.sut_node.send_expect("ip link set dev %s vf 0 trust off" % self.host_intf, "# ") def tear_down_all(self): """ @@ -1135,7 +1135,7 @@ class TestKernelpfIavf(TestCase): self.destroy_vm_env() if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.host_intf, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_kni.py b/tests/TestSuite_kni.py index e04a0cbc..f84e6bc5 100644 --- a/tests/TestSuite_kni.py +++ b/tests/TestSuite_kni.py @@ -13,12 +13,12 @@ import re import time from random import randint -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream -dut_ports = [] +sut_ports = [] port_virtual_interaces = [] ports_without_kni = 2 @@ -425,18 +425,18 @@ class TestKni(TestCase): KNI Prerequisites """ - out = self.dut.send_expect("which brctl", "# ") + out = self.sut_node.send_expect("which brctl", "# ") self.verify( "no brctl" not in out, "The linux tool brctl is needed to run this test suite", ) - out = self.dut.build_dpdk_apps("./examples/kni") - self.app_kni_path = self.dut.apps_name["kni"] + out = self.sut_node.build_dpdk_apps("./examples/kni") + self.app_kni_path = self.sut_node.apps_name["kni"] self.verify("Error" not in out, "Compilation failed") - p0_pci = self.dut.ports_info[0]["pci"] + p0_pci = self.sut_node.ports_info[0]["pci"] numa_node = int( - self.dut.send_expect( + self.sut_node.send_expect( "cat /sys/bus/pci/devices/%s/numa_node" % p0_pci, "# ", 30 ) ) @@ -470,12 +470,12 @@ class TestKni(TestCase): self.extract_ports_cores_config(default_1_port_cores_config) out = self.start_kni() self.verify("Error" not in out, "Error found during kni start") - out = self.dut.send_expect("cat /etc/os-release", "# ") + out = self.sut_node.send_expect("cat /etc/os-release", "# ") if "Ubuntu" in out: - self.dut.send_expect("ufw disable", "# ") + self.sut_node.send_expect("ufw disable", "# ") else: - self.dut.send_expect("service iptables stop", "# ") - self.dut.send_expect("service firewalld stop", "# ") + self.sut_node.send_expect("service iptables stop", "# ") + self.sut_node.send_expect("service firewalld stop", "# ") # get dts output path if self.logger.log_path.startswith(os.sep): @@ -484,7 +484,7 @@ class TestKni(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -503,16 +503,16 @@ class TestKni(TestCase): if kthread_mode is not None: module_param += "kthread_mode=%s" % kthread_mode - self.dut.kill_all() - out = self.dut.send_expect("rmmod rte_kni", "# ", 10) + self.sut_node.kill_all() + out = self.sut_node.send_expect("rmmod rte_kni", "# ", 10) self.verify("in use" not in out, "Error unloading KNI module: " + out) if self.drivername == "igb_uio": - self.dut.send_expect("rmmod igb_uio", "# ", 5) - self.dut.send_expect( + self.sut_node.send_expect("rmmod igb_uio", "# ", 5) + self.sut_node.send_expect( "insmod ./%s/kmod/igb_uio.ko" % (self.target), "# ", 20 ) - self.dut.bind_interfaces_linux(self.drivername) - out = self.dut.send_expect( + self.sut_node.bind_interfaces_linux(self.drivername) + out = self.sut_node.send_expect( "insmod ./%s/kmod/rte_kni.ko %s" % (self.target, module_param), "# ", 10 ) @@ -527,12 +527,12 @@ class TestKni(TestCase): config_param = self.build_config_param() - eal_para = self.dut.create_eal_parameters( + eal_para = self.sut_node.create_eal_parameters( cores=self.config["rx_cores"] + self.config["tx_cores"] + self.config["kernel_cores"] ) - out_kni = self.dut.send_expect( + out_kni = self.sut_node.send_expect( "./%s %s -- -P -p %s %s -m &" % (self.app_kni_path, eal_para, port_mask, config_param), "Link [Uu]p", @@ -542,7 +542,7 @@ class TestKni(TestCase): time.sleep(5) if kthread_mode == "single": kthread_mask = utils.create_mask(self.config["kernel_cores"]) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "taskset -p `pgrep -fl kni_single | awk '{print $1}'`", "#" ) self.verify("current affinity mask" in out, "Unable to set core affinity") @@ -555,7 +555,7 @@ class TestKni(TestCase): """ ports_cores_pattern = re.compile(ports_cores_template) port_configs = ports_cores_pattern.findall(ports_cores_config) - dut_ports = self.dut.get_ports(self.nic) + sut_ports = self.sut_node.get_ports(self.nic) config = {} ports = [] @@ -567,20 +567,20 @@ class TestKni(TestCase): details = {} port_number = int(port_config[0]) - self.verify(port_number < len(dut_ports), "Not enough ports available") + self.verify(port_number < len(sut_ports), "Not enough ports available") - ports.append(dut_ports[port_number]) - details["port"] = dut_ports[port_number] - rx_cores.append(self.dut.get_lcore_id(port_config[1])) - details["rx_core"] = self.dut.get_lcore_id(port_config[1]) - tx_cores.append(self.dut.get_lcore_id(port_config[2])) - details["tx_core"] = self.dut.get_lcore_id(port_config[2]) + ports.append(sut_ports[port_number]) + details["port"] = sut_ports[port_number] + rx_cores.append(self.sut_node.get_lcore_id(port_config[1])) + details["rx_core"] = self.sut_node.get_lcore_id(port_config[1]) + tx_cores.append(self.sut_node.get_lcore_id(port_config[2])) + details["tx_core"] = self.sut_node.get_lcore_id(port_config[2]) details["kernel_cores"] = [] for k_core in port_config[3:]: if k_core != "": - k_cores.append(self.dut.get_lcore_id(k_core)) - details["kernel_cores"].append(self.dut.get_lcore_id(k_core)) + k_cores.append(self.sut_node.get_lcore_id(k_core)) + details["kernel_cores"].append(self.sut_node.get_lcore_id(k_core)) port_details.append(details) @@ -646,16 +646,16 @@ class TestKni(TestCase): """ return "vEth%d_%d" % (port, sub_port) - def dut_physical_cores(self): + def sut_physical_cores(self): """ Returns the number of physical cores in socket 0. """ - dut_cores = self.dut.get_all_cores() + sut_cores = self.sut_node.get_all_cores() - first_core = dut_cores[0] + first_core = sut_cores[0] cores = [] - for core in dut_cores[1:]: + for core in sut_cores[1:]: if core["core"] not in cores and core["socket"] == first_core["socket"]: cores.append(core["core"]) @@ -666,13 +666,13 @@ class TestKni(TestCase): Create allow list with ports. """ allow_list = [] - dut_ports = self.dut.get_ports(self.nic) - self.dut.restore_interfaces() - allPort = self.dut.ports_info + sut_ports = self.sut_node.get_ports(self.nic) + self.sut_node.restore_interfaces() + allPort = self.sut_node.ports_info if self.drivername in ["igb_uio"]: - self.dut.send_expect("insmod ./" + self.target + "/kmod/igb_uio.ko", "#") + self.sut_node.send_expect("insmod ./" + self.target + "/kmod/igb_uio.ko", "#") for port in range(0, len(allPort)): - if port in dut_ports: + if port in sut_ports: allow_list.append(allPort[port]["pci"]) return allow_list @@ -689,7 +689,7 @@ class TestKni(TestCase): # Ports and cores configuration set in set_up_all function # Check that all virtual interfaces support ifconfig calls. - out = self.dut.send_expect("ifconfig -a", "# ") + out = self.sut_node.send_expect("ifconfig -a", "# ") for port in self.config["ports"]: virtual_interface = self.virtual_interface_name(port) self.verify( @@ -703,17 +703,17 @@ class TestKni(TestCase): # some time, the virtual interface stats is up when it create # so should set down before set up. - self.dut.send_expect("ifconfig %s down" % virtual_interface, "# ") - out = self.dut.send_expect("ifconfig %s up" % virtual_interface, "# ") + self.sut_node.send_expect("ifconfig %s down" % virtual_interface, "# ") + out = self.sut_node.send_expect("ifconfig %s up" % virtual_interface, "# ") self.verify( "Configure network interface of %d up" % port in out, "ifconfig up not supported", ) # Add an IPv6 address - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ifconfig %s add fe80::%d" % (virtual_interface, port + 1), "# " ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ip -family inet6 address show dev %s" % virtual_interface, "# " ) self.verify( @@ -722,10 +722,10 @@ class TestKni(TestCase): ) # Delete the IPv6 address - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ifconfig %s del fe80::%d" % (virtual_interface, port + 1), "# " ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ip -family inet6 address show dev %s" % virtual_interface, "# " ) self.verify( @@ -734,12 +734,12 @@ class TestKni(TestCase): ) # Add an IPv4 address - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ifconfig %s 192.168.%d.1 netmask 255.255.255.192" % (virtual_interface, port), "# ", ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ip -family inet address show dev %s" % virtual_interface, "# " ) self.verify( @@ -748,13 +748,13 @@ class TestKni(TestCase): ) # Set the MTU - out = self.dut.send_expect("ifconfig %s mtu 1300" % virtual_interface, "# ") - out = self.dut.send_expect("ip link show %s" % virtual_interface, "# ") + out = self.sut_node.send_expect("ifconfig %s mtu 1300" % virtual_interface, "# ") + out = self.sut_node.send_expect("ip link show %s" % virtual_interface, "# ") self.verify("mtu 1300" in out, "mtu setup not supported") # Bring down - self.dut.send_expect("ifconfig %s down" % virtual_interface, "# ") - out = self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s down" % virtual_interface, "# ") + out = self.sut_node.send_expect( "ip -family inet6 address show dev %s" % virtual_interface, "# " ) self.verify("inet6 addr" not in out, "ifconfig down not supported") @@ -765,54 +765,54 @@ class TestKni(TestCase): """ # Ports and cores configuration set in set_up_all function - # Setup IP address on virtual interfaces and tester ports - self.dut.kill_all() + # Setup IP address on virtual interfaces and TG ports + self.sut_node.kill_all() self.start_kni() ports_ips = {} for port in self.config["ports"]: virtual_interface = self.virtual_interface_name(port) - tx_port = self.tester.get_local_port(port) - tx_interface = self.tester.get_interface(tx_port) - out = self.dut.send_expect("ifconfig %s up" % virtual_interface, "# ") + tx_port = self.tg_node.get_local_port(port) + tx_interface = self.tg_node.get_interface(tx_port) + out = self.sut_node.send_expect("ifconfig %s up" % virtual_interface, "# ") time.sleep(5) v_intf_ip = f"192.168.{port}.1" tx_intf_ip = f"192.168.{port}.2" - self.dut.send_expect( + self.sut_node.send_expect( "ifconfig %s %s netmask 255.255.255.192" % (virtual_interface, v_intf_ip), "# ", ) - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s %s netmask 255.255.255.192" % (tx_interface, tx_intf_ip), "# ", ) ports_ips[port] = [tx_intf_ip, v_intf_ip] - self.tester.enable_ipv6(tx_interface) + self.tg_node.enable_ipv6(tx_interface) time.sleep(5) # Send ping requests and check for answers for port in self.config["ports"]: tx_intf_ip, v_intf_ip = ports_ips[port] - tx_port = self.tester.get_local_port(port) - tx_interface = self.tester.get_interface(tx_port) + tx_port = self.tg_node.get_local_port(port) + tx_interface = self.tg_node.get_interface(tx_port) virtual_interface = self.virtual_interface_name(port) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ping -w 2 -I %s 192.168.%d.2" % (v_intf_ip, port), "# ", 10 ) - out1 = self.dut.send_expect( + out1 = self.sut_node.send_expect( "ping -w 2 -I %s 192.168.%d.2" % (virtual_interface, port), "# ", 10 ) expected_str = "64 bytes from 192.168.%d.2:" % port self.verify( any([expected_str in out, expected_str in out1]), "ping not supported" ) - out = self.tester.send_expect( + out = self.tg_node.send_expect( "ping -w 1 -I %s 192.168.%d.1" % (tx_intf_ip, port), "# ", 10 ) - out1 = self.tester.send_expect( + out1 = self.tg_node.send_expect( "ping -w 1 -I %s 192.168.%d.1" % (tx_interface, port), "# ", 10 ) expected_str = "64 bytes from 192.168.%d.1:" % port @@ -820,54 +820,54 @@ class TestKni(TestCase): any([expected_str in out, expected_str in out1]), "kni cannot reply ping packet", ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ping -w 1 -I %s 192.168.%d.123" % (v_intf_ip, port), "# ", 10 ) - out1 = self.dut.send_expect( + out1 = self.sut_node.send_expect( "ping -w 1 -I %s 192.168.%d.123" % (virtual_interface, port), "# ", 10 ) expected_str = "0 received, 100% packet loss" self.verify( all([expected_str in out, expected_str in out1]), "ping not supported" ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ip -family inet6 address show dev %s | awk '/inet6/ { print $2 }'| cut -d'/' -f1" % virtual_interface, "# ", 10, ) - out1 = self.tester.send_expect( + out1 = self.tg_node.send_expect( "ip -family inet6 address show dev %s | awk '/inet6/ { print $2 }'| cut -d'/' -f1" % tx_interface, "# ", 10, ) if out.strip() == "": - self.dut.send_expect( + self.sut_node.send_expect( "ip -6 addr add fe80::742e:c5ef:bb9:b4c8/64 dev %s" % virtual_interface, "# ", 3, ) if out1.strip() == "": - self.tester.send_expect( + self.tg_node.send_expect( "ip -6 addr add fe80::742e:c5ef:bb9:b4c9/64 dev %s" % tx_interface, "# ", 3, ) time.sleep(3) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ip -family inet6 address show dev %s | awk '/inet6/ { print $2 }'| cut -d'/' -f1" % virtual_interface, "# ", 10, ) ipv6_address = out.split("\r\n")[0] - self.tester.send_expect("ifconfig %s up" % tx_interface, "# ") - out = self.dut.send_expect( + self.tg_node.send_expect("ifconfig %s up" % tx_interface, "# ") + out = self.sut_node.send_expect( "ping6 -w 1 -I %s %s" % (v_intf_ip, str(ipv6_address)), "# ", 10 ) - out1 = self.dut.send_expect( + out1 = self.sut_node.send_expect( "ping6 -w 1 %s%%%s" % (str(ipv6_address), virtual_interface), "# ", 10 ) # FC25 ping6 output info is "64 bytes from ipv6_address%v: icmp_seq=1 ttl=64" @@ -876,10 +876,10 @@ class TestKni(TestCase): self.verify( any([expected_str in out, expected_str in out1]), "ping6 not supported" ) - out = self.tester.send_expect( + out = self.tg_node.send_expect( "ping6 -w 1 -I %s %s" % (tx_intf_ip, str(ipv6_address)), "# ", 10 ) - out1 = self.tester.send_expect( + out1 = self.tg_node.send_expect( "ping6 -w 1 %s%%%s" % (str(ipv6_address), tx_interface), "# ", 10 ) expected_str = "64 bytes from %s" % ipv6_address @@ -895,25 +895,25 @@ class TestKni(TestCase): ipv6list[-1] = str(j) break - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ping6 -w 1 -I %s %s" % (v_intf_ip, "".join(ipv6list)), "# ", 10 ) - out1 = self.dut.send_expect( + out1 = self.sut_node.send_expect( "ping6 -w 1 %s%%%s" % ("".join(ipv6list), virtual_interface), "# ", 10 ) expected_str = "0 received, 100% packet loss" self.verify( any([expected_str in out, expected_str in out1]), "ping6 not supported" ) - # remove ip from tester - self.tester.send_expect( + # remove ip from TG + self.tg_node.send_expect( "ip addr del 192.168.%d.2 dev %s" % (port, tx_interface), "# " ) for port in self.config["ports"]: - tx_port = self.tester.get_local_port(port) - tx_interface = self.tester.get_interface(tx_port) - self.tester.disable_ipv6(tx_interface) + tx_port = self.tg_node.get_local_port(port) + tx_interface = self.tg_node.get_interface(tx_port) + self.tg_node.disable_ipv6(tx_interface) time.sleep(1) def test_tcpdump(self): @@ -922,28 +922,28 @@ class TestKni(TestCase): """ # Ports and cores configuration set in set_up_all function - self.dut.kill_all() + self.sut_node.kill_all() self.start_kni() file_name = "packet.log" for port in self.config["ports"]: - self.dut.send_expect(f"rm -rf {file_name}", "#") + self.sut_node.send_expect(f"rm -rf {file_name}", "#") virtual_interface = self.virtual_interface_name(port) - tx_port = self.tester.get_local_port(port) - rx_mac = self.dut.get_mac_address(port) - tx_mac = self.tester.get_mac(tx_port) - tx_interface = self.tester.get_interface(tx_port) + tx_port = self.tg_node.get_local_port(port) + rx_mac = self.sut_node.get_mac_address(port) + tx_mac = self.tg_node.get_mac(tx_port) + tx_interface = self.tg_node.get_interface(tx_port) - self.dut.send_expect("ifconfig %s up" % virtual_interface, "# ") + self.sut_node.send_expect("ifconfig %s up" % virtual_interface, "# ") # ensure virtual_interface link up self.verify( - self.dut.is_interface_up(intf=virtual_interface), + self.sut_node.is_interface_up(intf=virtual_interface), "Wrong link status, should be up", ) # Start tcpdump with filters for src and dst MAC address, this avoids # unwanted broadcast, ICPM6... packets - out = self.dut.send_expect( + out = self.sut_node.send_expect( 'tcpdump -i %s -e -w %s "ether src %s and ether dst %s"' % (virtual_interface, file_name, tx_mac, rx_mac), "listening on %s" % virtual_interface, @@ -958,15 +958,15 @@ class TestKni(TestCase): 'sendp([Ether(src=srcmac,dst=dstmac)/("W"*46)],iface="%s")', ] - self.tester.scapy_append('dstmac="%s"' % rx_mac) - self.tester.scapy_append('srcmac="%s"' % tx_mac) + self.tg_node.scapy_append('dstmac="%s"' % rx_mac) + self.tg_node.scapy_append('srcmac="%s"' % tx_mac) for packet in packets_to_send: - self.tester.scapy_append(packet % tx_interface) + self.tg_node.scapy_append(packet % tx_interface) - self.tester.scapy_execute() + self.tg_node.scapy_execute() - out = self.dut.send_expect("^C", "# ", 20) + out = self.sut_node.send_expect("^C", "# ", 20) self.verify( "%d packets captured" % len(packets_to_send) in out, @@ -979,7 +979,7 @@ class TestKni(TestCase): """ rx_match = "RX packets.(\d+)" - self.dut.kill_all() + self.sut_node.kill_all() self.start_kni(lo_mode="lo_mode_ring_skb") # Ports and cores configuration set in set_up_all function @@ -988,20 +988,20 @@ class TestKni(TestCase): virtual_interface = self.virtual_interface_name(port) - out = self.dut.send_expect("ifconfig %s up" % virtual_interface, "# ") + out = self.sut_node.send_expect("ifconfig %s up" % virtual_interface, "# ") # ensure virtual_interface up self.verify( - self.dut.is_interface_up(intf=virtual_interface), + self.sut_node.is_interface_up(intf=virtual_interface), "virtual_interface should be up", ) - out = self.dut.send_expect("ifconfig %s" % virtual_interface, "# ") + out = self.sut_node.send_expect("ifconfig %s" % virtual_interface, "# ") m = re.search(rx_match, out) previous_rx_packets = int(m.group(1)) - tx_port = self.tester.get_local_port(port) - rx_mac = self.dut.get_mac_address(port) - tx_mac = self.tester.get_mac(tx_port) - tx_interface = self.tester.get_interface(tx_port) + tx_port = self.tg_node.get_local_port(port) + rx_mac = self.sut_node.get_mac_address(port) + tx_mac = self.tg_node.get_mac(tx_port) + tx_interface = self.tg_node.get_interface(tx_port) scapy_str = [ 'Ether(src = "%s",dst="%s")/IP()/UDP()/("X"*28)' % (tx_mac, rx_mac), @@ -1011,16 +1011,16 @@ class TestKni(TestCase): 'Ether(src = "%s",dst="%s")/("X"*46)' % (tx_mac, rx_mac), ] - pkt = packet.Packet() - pkt.update_pkt(scapy_str) - # ensure tester's interface up + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.update_pkt(scapy_str) + # ensure TG's interface up self.verify( - self.tester.is_interface_up(intf=tx_interface), - "Tester's interface should be up", + self.tg_node.is_interface_up(intf=tx_interface), + "TG's interface should be up", ) - pkt.send_pkt(self.tester, tx_port=tx_interface, count=200) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=tx_interface, count=200) - out = self.dut.send_expect("ifconfig %s" % virtual_interface, "# ") + out = self.sut_node.send_expect("ifconfig %s" % virtual_interface, "# ") m = re.search(rx_match, out) rx_packets = int(m.group(1)) @@ -1029,17 +1029,17 @@ class TestKni(TestCase): "Rx statistics error in iface %s" % virtual_interface, ) - self.dut.kill_all() + self.sut_node.kill_all() def test_stress(self): """ KNI stress test. """ self.extract_ports_cores_config(default_2_port_cores_config) - self.dut.send_expect("dmesg -c", "]# ") # Clean the dmesg ring buffer + self.sut_node.send_expect("dmesg -c", "]# ") # Clean the dmesg ring buffer for i in range(stress_test_iterations + stress_test_random_iterations): - self.dut.kill_all() + self.sut_node.kill_all() if i < stress_test_iterations: step = stress_modes_output[i % len(stress_modes_output)] @@ -1053,7 +1053,7 @@ class TestKni(TestCase): self.verify("Error" not in out, "Error found during kni start") # kni setup out info by kernel debug function. so should re-build kernel. # now not check kni setup out info, only check kni setup ok and setup no error output - out = self.dut.send_expect("ps -aux", "]# ") + out = self.sut_node.send_expect("ps -aux", "]# ") self.verify("kni" not in out, "kni process setup failed") except: # some permutations have to fail @@ -1063,7 +1063,7 @@ class TestKni(TestCase): """ KNI loopback performance """ - self.dut.kill_all() + self.sut_node.kill_all() header = loopback_perf_results_header for size in packet_sizes_loopback: @@ -1081,10 +1081,10 @@ class TestKni(TestCase): + self.config["rx_cores"] + self.config["kernel_cores"] ) - if total_cores > self.dut_physical_cores(): + if total_cores > self.sut_physical_cores(): self.logger.info( "Skipping step %s (%d cores needed, got %d)" - % (step["config"], total_cores, self.dut_physical_cores()) + % (step["config"], total_cores, self.sut_physical_cores()) ) continue @@ -1102,28 +1102,28 @@ class TestKni(TestCase): tgen_input = [] for port in self.config["ports"]: - rx_mac = self.dut.get_mac_address(port) - tx_port = self.tester.get_local_port(port) - self.tester.scapy_append('dstmac = "%s"' % rx_mac) - self.tester.scapy_append( + rx_mac = self.sut_node.get_mac_address(port) + tx_port = self.tg_node.get_local_port(port) + self.tg_node.scapy_append('dstmac = "%s"' % rx_mac) + self.tg_node.scapy_append( 'flows = [Ether(dst=dstmac)/IP()/("X"*%d)]' % payload_size ) pcap = os.sep.join( - [self.output_path, "tester{0}.pcap".format(tx_port)] + [self.output_path, "tg{0}.pcap".format(tx_port)] ) - self.tester.scapy_append('wrpcap("%s",flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s",flows)' % pcap) + self.tg_node.scapy_execute() tgen_input.append((tx_port, tx_port, pcap)) time.sleep(1) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) pps_results.append(float(pps) / 1000000) @@ -1136,7 +1136,7 @@ class TestKni(TestCase): ] + pps_results self.result_table_add(results_row) - self.dut.kill_all() + self.sut_node.kill_all() self.result_table_print() @@ -1146,13 +1146,13 @@ class TestKni(TestCase): """ self.result_table_create(bridge_perf_results_header) - self.tester.scapy_append('srcmac="00:00:00:00:00:01"') + self.tg_node.scapy_append('srcmac="00:00:00:00:00:01"') pcap = os.sep.join([self.output_path, "kni.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src=srcmac, dst="ff:ff:ff:ff:ff:ff")/IP(len=46)/UDP()/("X"*18)])' % pcap ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() for step in bridge_performance_steps: @@ -1163,10 +1163,10 @@ class TestKni(TestCase): + self.config["rx_cores"] + self.config["kernel_cores"] ) - if total_cores > self.dut_physical_cores(): + if total_cores > self.sut_physical_cores(): self.logger.info( "Skipping step %s (%d cores needed, got %d)" - % (step["config"], total_cores, self.dut_physical_cores()) + % (step["config"], total_cores, self.sut_physical_cores()) ) continue @@ -1177,40 +1177,40 @@ class TestKni(TestCase): self.virtual_interface_name(port["port"], i) ) - self.dut.kill_all() + self.sut_node.kill_all() self.start_kni(lo_mode=None, kthread_mode=step["kthread_mode"]) for virtual_interace in port_virtual_interaces: - out = self.dut.send_expect("ifconfig %s up" % virtual_interace, "# ") + out = self.sut_node.send_expect("ifconfig %s up" % virtual_interace, "# ") self.verify("ERROR" not in out, "Virtual interface not found") - self.dut.send_expect('brctl addbr "br_kni"', "# ") + self.sut_node.send_expect('brctl addbr "br_kni"', "# ") for virtual_interace in port_virtual_interaces: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "brctl addif br_kni %s" % virtual_interace, "# " ) self.verify("ERROR" not in out, "Device not found") - self.dut.send_expect("ifconfig br_kni up", "# ") + self.sut_node.send_expect("ifconfig br_kni up", "# ") - tx_port = self.tester.get_local_port(self.config["ports"][0]) - rx_port = self.tester.get_local_port(self.config["ports"][1]) + tx_port = self.tg_node.get_local_port(self.config["ports"][0]) + rx_port = self.tg_node.get_local_port(self.config["ports"][1]) tgenInput = [] tgenInput.append((tx_port, rx_port, pcap)) if step["flows"] == 2: tgenInput.append((rx_port, tx_port, pcap)) - self.verify(self.dut.is_interface_up(intf="br_kni"), "br_kni should be up") + self.verify(self.sut_node.is_interface_up(intf="br_kni"), "br_kni should be up") # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) step["pps"] = float(pps) / 10**6 results_row = [ @@ -1222,8 +1222,8 @@ class TestKni(TestCase): self.result_table_add(results_row) - self.dut.send_expect("ifconfig br_kni down", "# ") - self.dut.send_expect('brctl delbr "br_kni"', "# ", 10) + self.sut_node.send_expect("ifconfig br_kni down", "# ") + self.sut_node.send_expect('brctl delbr "br_kni"', "# ", 10) self.result_table_print() @@ -1233,22 +1233,22 @@ class TestKni(TestCase): """ self.result_table_create(bridge_perf_no_kni_results_header) - self.dut.kill_all() + self.sut_node.kill_all() - dut_ports = self.dut.get_ports(self.nic) + sut_ports = self.sut_node.get_ports(self.nic) - self.tester.scapy_append('srcmac="00:00:00:00:00:01"') + self.tg_node.scapy_append('srcmac="00:00:00:00:00:01"') pcap = os.sep.join([self.output_path, "kni.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src=srcmac, dst="ff:ff:ff:ff:ff:ff")/IP(len=46)/UDP()/("X"*18)])' % pcap ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() allow_list = self.make_allow_list(self.target, self.nic) port_virtual_interaces = [] for port in allow_list: - information = self.dut.send_expect( + information = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status | grep '%s'" % port, "# " ) data = information.split(" ") @@ -1256,16 +1256,16 @@ class TestKni(TestCase): if field.rfind("if=") != -1: port_virtual_interaces.append(field.replace("if=", "")) - self.dut.send_expect("ifconfig %s up" % port_virtual_interaces[0], "# ") - self.dut.send_expect("ifconfig %s up" % port_virtual_interaces[1], "# ") - self.dut.send_expect('brctl addbr "br1"', "# ") - self.dut.send_expect("brctl addif br1 %s" % port_virtual_interaces[0], "# ") - self.dut.send_expect("brctl addif br1 %s" % port_virtual_interaces[1], "# ") - self.dut.send_expect("ifconfig br1 up", "# ") + self.sut_node.send_expect("ifconfig %s up" % port_virtual_interaces[0], "# ") + self.sut_node.send_expect("ifconfig %s up" % port_virtual_interaces[1], "# ") + self.sut_node.send_expect('brctl addbr "br1"', "# ") + self.sut_node.send_expect("brctl addif br1 %s" % port_virtual_interaces[0], "# ") + self.sut_node.send_expect("brctl addif br1 %s" % port_virtual_interaces[1], "# ") + self.sut_node.send_expect("ifconfig br1 up", "# ") time.sleep(3) - tx_port = self.tester.get_local_port(dut_ports[0]) - rx_port = self.tester.get_local_port(dut_ports[1]) + tx_port = self.tg_node.get_local_port(sut_ports[0]) + rx_port = self.tg_node.get_local_port(sut_ports[1]) for flows in range(1, flows_without_kni + 1): tgen_input = [] @@ -1275,20 +1275,20 @@ class TestKni(TestCase): tgen_input.append((rx_port, tx_port, pcap)) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) self.result_table_add([flows, float(pps) / 10**6]) - self.dut.send_expect("ifconfig br1 down", "# ") - self.dut.send_expect('brctl delbr "br1"', "# ", 30) + self.sut_node.send_expect("ifconfig br1 down", "# ") + self.sut_node.send_expect('brctl delbr "br1"', "# ", 30) for port in allow_list: - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b igb_uio %s" % (port), "# " ) self.result_table_print() @@ -1305,7 +1305,7 @@ class TestKni(TestCase): self.result_table_create(header) - self.dut.send_expect("echo 1 > /proc/sys/net/ipv4/ip_forward", "# ") + self.sut_node.send_expect("echo 1 > /proc/sys/net/ipv4/ip_forward", "# ") # Run the test steps for step in routing_performance_steps: @@ -1317,7 +1317,7 @@ class TestKni(TestCase): self.stripped_config_param(), ] - self.dut.kill_all() + self.sut_node.kill_all() self.start_kni() # Set up the IP addresses, routes and arp entries of the virtual @@ -1340,21 +1340,21 @@ class TestKni(TestCase): # Setup IP, ARP and route for each virtual interface for interface in range(len(virtual_interaces[port_number])): - tx_port = self.tester.get_local_port(port_number) + tx_port = self.tg_node.get_local_port(port_number) - self.dut.send_expect( + self.sut_node.send_expect( "ifconfig %s 192.170.%d.1" % (virtual_interaces[port_number][interface], ip_subnet), "# ", ) - self.dut.send_expect( + self.sut_node.send_expect( "route add -net 192.170.%d.0 netmask 255.255.255.0 gw 192.170.%d.1" % (ip_subnet, ip_subnet), "# ", ) - self.dut.send_expect( + self.sut_node.send_expect( "arp -s 192.170.%d.2 %s" - % (ip_subnet, self.tester.get_mac(tx_port)), + % (ip_subnet, self.tg_node.get_mac(tx_port)), "# ", ) ip_subnet += 1 @@ -1365,16 +1365,16 @@ class TestKni(TestCase): tgen_input = [] # Test one port - tx_port = self.tester.get_local_port(self.config["ports"][0]) - rx_mac = self.dut.get_mac_address(self.config["ports"][0]) + tx_port = self.tg_node.get_local_port(self.config["ports"][0]) + rx_mac = self.sut_node.get_mac_address(self.config["ports"][0]) port_iterator = 0 cnt = 0 for port in self.config["port_details"]: port_number = port["port"] - rx_mac = self.dut.get_mac_address(port_number) - tx_port = self.tester.get_local_port(port_number) + rx_mac = self.sut_node.get_mac_address(port_number) + tx_port = self.tg_node.get_local_port(port_number) num_interfaces_per_port = len(virtual_interaces[port_number]) @@ -1383,8 +1383,8 @@ class TestKni(TestCase): for interface in range(len(virtual_interaces[port_number])): dst_ip_subnet = (src_ip_subnet + 1) % num_interfaces_per_port dst_ip_subnet += port_iterator * num_interfaces_per_port - self.tester.scapy_append("flows = []") - self.tester.scapy_append( + self.tg_node.scapy_append("flows = []") + self.tg_node.scapy_append( 'flows.append(Ether(dst="%s")/IP(src="192.170.%d.2",dst="192.170.%d.2")/("X"*%d))' % (rx_mac, src_ip_subnet, dst_ip_subnet, payload_size) ) @@ -1392,19 +1392,19 @@ class TestKni(TestCase): pcap = os.sep.join( [self.output_path, "routePerf_{0}.pcap".format(cnt)] ) - self.tester.scapy_append('wrpcap("%s",flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s",flows)' % pcap) + self.tg_node.scapy_execute() tgen_input.append((tx_port, tx_port, pcap)) cnt += 1 time.sleep(1) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) resutls_row.append(float(pps) / 10**6) @@ -1424,12 +1424,12 @@ class TestKni(TestCase): self.result_table_create(header) - self.dut.kill_all() - self.dut.send_expect("rmmod rte_kni", "# ", 20) + self.sut_node.kill_all() + self.sut_node.send_expect("rmmod rte_kni", "# ", 20) - self.dut.send_expect("systemctl stop NetworkManager.service", "# ") + self.sut_node.send_expect("systemctl stop NetworkManager.service", "# ") - dut_ports = self.dut.get_ports(self.nic) + sut_ports = self.sut_node.get_ports(self.nic) allow_list = self.make_allow_list(self.target, self.nic) port_virtual_interaces = [] @@ -1437,7 +1437,7 @@ class TestKni(TestCase): for port in allow_list: # Enables the interfaces - information = self.dut.send_expect( + information = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status | grep '%s'" % port, "# " ) data = information.split(" ") @@ -1445,24 +1445,24 @@ class TestKni(TestCase): if field.rfind("if=") != -1: interface_aux = field.replace("if=", "") port_virtual_interaces.append(interface_aux) - self.dut.send_expect("ifconfig %s up" % interface_aux, "# ") + self.sut_node.send_expect("ifconfig %s up" % interface_aux, "# ") - self.dut.send_expect("echo 1 > /proc/sys/net/ipv4/ip_forward", "# ") + self.sut_node.send_expect("echo 1 > /proc/sys/net/ipv4/ip_forward", "# ") for port in range(0, ports_without_kni): - tx_port = self.tester.get_local_port(dut_ports[port]) - self.dut.send_expect( + tx_port = self.tg_node.get_local_port(sut_ports[port]) + self.sut_node.send_expect( "ifconfig %s 192.170.%d.1 up" % (port_virtual_interaces[port], port + 100), "# ", ) - self.dut.send_expect( + self.sut_node.send_expect( "route add -net 192.170.%d.0 netmask 255.255.255.0 gw 192.170.%d.1" % (port + 100, port + 100), "# ", ) - self.dut.send_expect( - "arp -s 192.170.%d.2 %s" % (port + 100, self.tester.get_mac(tx_port)), + self.sut_node.send_expect( + "arp -s 192.170.%d.2 %s" % (port + 100, self.tg_node.get_mac(tx_port)), "# ", ) @@ -1474,16 +1474,16 @@ class TestKni(TestCase): tgen_input = [] # Prepare test with 1 port - tx_port = self.tester.get_local_port(dut_ports[0]) - rx_mac = self.dut.get_mac_address(dut_ports[0]) - self.tester.scapy_append("flows = []") - self.tester.scapy_append( + tx_port = self.tg_node.get_local_port(sut_ports[0]) + rx_mac = self.sut_node.get_mac_address(sut_ports[0]) + self.tg_node.scapy_append("flows = []") + self.tg_node.scapy_append( 'flows.append(Ether(dst="%s")/IP(src="192.170.100.2",dst="192.170.100.2")/("X"*%d))' % (rx_mac, payload_size) ) pcap = os.sep.join([self.output_path, "routePerf_1.pcap"]) - self.tester.scapy_append('wrpcap("%s",flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s",flows)' % pcap) + self.tg_node.scapy_execute() tgen_input = [] tgen_input.append((tx_port, tx_port, pcap)) @@ -1491,12 +1491,12 @@ class TestKni(TestCase): # Get throughput with 1 port # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) one_port_resutls_row.append(float(pps) / 10**6) self.result_table_add(one_port_resutls_row) @@ -1504,10 +1504,10 @@ class TestKni(TestCase): # Prepare test with 'ports_without_kni' ports cnt = 0 for port in range(ports_without_kni): - rx_mac = self.dut.get_mac_address(dut_ports[port]) - tx_port = self.tester.get_local_port(dut_ports[port]) - self.tester.scapy_append("flows = []") - self.tester.scapy_append( + rx_mac = self.sut_node.get_mac_address(sut_ports[port]) + tx_port = self.tg_node.get_local_port(sut_ports[port]) + self.tg_node.scapy_append("flows = []") + self.tg_node.scapy_append( 'flows.append(Ether(dst="%s")/IP(src="192.170.%d.2",dst="192.170.%d.2")/("X"*%d))' % ( rx_mac, @@ -1523,18 +1523,18 @@ class TestKni(TestCase): ] ) tgen_input.append((tx_port, tx_port, pcap)) - self.tester.scapy_append('wrpcap("%s",flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s",flows)' % pcap) + self.tg_node.scapy_execute() cnt += 1 # Get throughput with 'ports_without_kni' ports # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) two_port_resutls_row.append(float(pps) / 10**6) self.result_table_add(two_port_resutls_row) @@ -1542,7 +1542,7 @@ class TestKni(TestCase): self.result_table_print() for port in allow_list: - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.drivername, port), "# " ) @@ -1552,13 +1552,13 @@ class TestKni(TestCase): """ if self._suite_result.test_case == "test_ping": for port in self.config["ports"]: - tx_port = self.tester.get_local_port(port) - tx_interface = self.tester.get_interface(tx_port) - self.tester.send_expect("ip addr flush %s" % tx_interface, "# ") + tx_port = self.tg_node.get_local_port(port) + tx_interface = self.tg_node.get_interface(tx_port) + self.tg_node.send_expect("ip addr flush %s" % tx_interface, "# ") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.send_expect("rmmod rte_kni", "# ", 10) + self.sut_node.kill_all() + self.sut_node.send_expect("rmmod rte_kni", "# ", 10) diff --git a/tests/TestSuite_l2fwd.py b/tests/TestSuite_l2fwd.py index 9e8fd2d5..ca2f8e17 100644 --- a/tests/TestSuite_l2fwd.py +++ b/tests/TestSuite_l2fwd.py @@ -10,9 +10,9 @@ import os import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestL2fwd(TestCase): @@ -35,18 +35,18 @@ class TestL2fwd(TestCase): self.number_of_ports = 2 self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["udp"] - self.dut_ports = self.dut.get_ports_performance(force_different_nic=False) + self.sut_ports = self.sut_node.get_ports_performance(force_different_nic=False) self.verify( - len(self.dut_ports) >= self.number_of_ports, + len(self.sut_ports) >= self.number_of_ports, "Not enough ports for " + self.nic, ) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) # compile - out = self.dut.build_dpdk_apps("./examples/l2fwd") - self.app_l2fwd_path = self.dut.apps_name["l2fwd"] + out = self.sut_node.build_dpdk_apps("./examples/l2fwd") + self.app_l2fwd_path = self.sut_node.apps_name["l2fwd"] self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") @@ -64,7 +64,7 @@ class TestL2fwd(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -73,42 +73,42 @@ class TestL2fwd(TestCase): pass def quit_l2fwd(self): - self.dut.send_expect("fg", "l2fwd ", 5) - self.dut.send_expect("^C", "# ", 5) + self.sut_node.send_expect("fg", "l2fwd ", 5) + self.sut_node.send_expect("^C", "# ", 5) def notest_port_testing(self): """ Check port forwarding. """ # the cases use the first two ports - port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) - eal_params = self.dut.create_eal_parameters() + port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) + eal_params = self.sut_node.create_eal_parameters() - self.dut.send_expect( + self.sut_node.send_expect( "./%s %s -- -q 8 -p %s &" % (self.app_l2fwd_path, eal_params, port_mask), "L2FWD: entering main loop", 60, ) for i in [0, 1]: - tx_port = self.tester.get_local_port(self.dut_ports[i]) - rx_port = self.tester.get_local_port(self.dut_ports[1 - i]) + tx_port = self.tg_node.get_local_port(self.sut_ports[i]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1 - i]) - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) - self.tester.scapy_background() - self.tester.scapy_append('p = sniff(iface="%s", count=1)' % rx_interface) - self.tester.scapy_append("number_packets=len(p)") - self.tester.scapy_append("RESULT = str(number_packets)") + self.tg_node.scapy_background() + self.tg_node.scapy_append('p = sniff(iface="%s", count=1)' % rx_interface) + self.tg_node.scapy_append("number_packets=len(p)") + self.tg_node.scapy_append("RESULT = str(number_packets)") - self.tester.scapy_foreground() - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append( 'sendp([Ether()/IP()/UDP()/("X"*46)], iface="%s")' % tx_interface ) - self.tester.scapy_execute() - number_packets = self.tester.scapy_get_result() + self.tg_node.scapy_execute() + number_packets = self.tg_node.scapy_get_result() self.verify(number_packets == "1", "Failed to switch L2 frame") self.quit_l2fwd() @@ -118,12 +118,12 @@ class TestL2fwd(TestCase): Check port forwarding. """ # the cases use the first two ports - port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) - cores = self.dut.get_core_list(self.core_config, socket=self.ports_socket) + port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) + cores = self.sut_node.get_core_list(self.core_config, socket=self.ports_socket) core_mask = utils.create_mask( - self.dut.get_core_list(self.core_config, socket=self.ports_socket) + self.sut_node.get_core_list(self.core_config, socket=self.ports_socket) ) - eal_params = self.dut.create_eal_parameters(cores=cores) + eal_params = self.sut_node.create_eal_parameters(cores=cores) for queues in self.test_queues: command_line = "./%s %s -- -q %s -p %s &" % ( @@ -133,15 +133,15 @@ class TestL2fwd(TestCase): port_mask, ) - self.dut.send_expect(command_line, "L2FWD: entering main loop", 60) + self.sut_node.send_expect(command_line, "L2FWD: entering main loop", 60) tgen_input = [] - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_input.append((tx_port, rx_port)) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) - result = self.tester.check_random_pkts( + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + result = self.tg_node.check_random_pkts( tgen_input, allow_miss=False, params=[("ether", {"dst": "%s" % (self.dst_mac)})], @@ -156,15 +156,15 @@ class TestL2fwd(TestCase): """ ports = [] for port in range(self.number_of_ports): - ports.append(self.dut_ports[port]) + ports.append(self.sut_ports[port]) port_mask = utils.create_mask(ports) - cores = self.dut.get_core_list(self.core_config, socket=self.ports_socket) + cores = self.sut_node.get_core_list(self.core_config, socket=self.ports_socket) - eal_params = self.dut.create_eal_parameters(cores=cores) + eal_params = self.sut_node.create_eal_parameters(cores=cores) eal_param = "" for i in ports: - eal_param += " -a %s" % self.dut.ports_info[i]["pci"] + eal_param += " -a %s" % self.sut_node.ports_info[i]["pci"] for frame_size in self.frame_sizes: @@ -173,25 +173,25 @@ class TestL2fwd(TestCase): tgen_input = [] cnt = 1 for port in range(self.number_of_ports): - rx_port = self.tester.get_local_port( - self.dut_ports[port % self.number_of_ports] + rx_port = self.tg_node.get_local_port( + self.sut_ports[port % self.number_of_ports] ) - tx_port = self.tester.get_local_port( - self.dut_ports[(port + 1) % self.number_of_ports] + tx_port = self.tg_node.get_local_port( + self.sut_ports[(port + 1) % self.number_of_ports] ) - destination_mac = self.dut.get_mac_address( - self.dut_ports[(port + 1) % self.number_of_ports] + destination_mac = self.sut_node.get_mac_address( + self.sut_ports[(port + 1) % self.number_of_ports] ) pcap = os.sep.join( [self.output_path, "l2fwd_{0}_{1}.pcap".format(port, cnt)] ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(dst="%s")/IP()/UDP()/("X"*%d)])' % (pcap, destination_mac, payload_size) ) tgen_input.append((tx_port, rx_port, pcap)) time.sleep(3) - self.tester.scapy_execute() + self.tg_node.scapy_execute() cnt += 1 for queues in self.test_queues: @@ -204,11 +204,11 @@ class TestL2fwd(TestCase): port_mask, ) - # self.dut.send_expect(command_line, "memory mapped", 60) - self.dut.send_expect(command_line, "L2FWD: entering main loop", 60) + # self.sut_node.send_expect(command_line, "memory mapped", 60) + self.sut_node.send_expect(command_line, "L2FWD: entering main loop", 60) # wait 5 second after l2fwd boot up. # It is aimed to make sure trex detect link up status. - if self.tester.is_pktgen: + if self.tg_node.uses_perf_tg: time.sleep(5) info = ( "Executing l2fwd using %s queues, frame size %d and %s setup.\n" @@ -220,12 +220,12 @@ class TestL2fwd(TestCase): self.rst_report(command_line + "\n\n", frame=True, annex=True) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 queues["Mpps"][frame_size] = Mpps @@ -260,8 +260,8 @@ class TestL2fwd(TestCase): """ Run after each test case. """ - self.dut.send_expect("fg", "l2fwd|# ", 5) - self.dut.send_expect("^C", "# ", 5) + self.sut_node.send_expect("fg", "l2fwd|# ", 5) + self.sut_node.send_expect("^C", "# ", 5) def tear_down_all(self): """ diff --git a/tests/TestSuite_l2fwd_cryptodev_func.py b/tests/TestSuite_l2fwd_cryptodev_func.py index 5c4634a8..3d4de755 100644 --- a/tests/TestSuite_l2fwd_cryptodev_func.py +++ b/tests/TestSuite_l2fwd_cryptodev_func.py @@ -18,7 +18,7 @@ from cryptography.hazmat.primitives.ciphers.aead import AESCCM, AESGCM import framework.utils as utils import tests.cryptodev_common as cc -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -26,38 +26,38 @@ class TestL2fwdCrypto(TestCase): def set_up_all(self): self.core_config = "1S/3C/1T" self.number_of_ports = 2 - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) self.verify( - len(self.dut_ports) >= self.number_of_ports, + len(self.sut_ports) >= self.number_of_ports, "Not enough ports for " + self.nic, ) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.logger.info("core config = " + self.core_config) self.logger.info("number of ports = " + str(self.number_of_ports)) - self.logger.info("dut ports = " + str(self.dut_ports)) + self.logger.info("SUT ports = " + str(self.sut_ports)) self.logger.info("ports_socket = " + str(self.ports_socket)) - self.core_list = self.dut.get_core_list( + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_mask = utils.create_mask(self.core_list) - self.port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + self.port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.rx_port = self.tester.get_local_port(self.dut_ports[1]) + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.rx_port = self.tg_node.get_local_port(self.sut_ports[1]) - self.tx_interface = self.tester.get_interface(self.tx_port) - self.rx_interface = self.tester.get_interface(self.rx_port) + self.tx_interface = self.tg_node.get_interface(self.tx_port) + self.rx_interface = self.tg_node.get_interface(self.rx_port) self.logger.info("core mask = " + self.core_mask) self.logger.info("port mask = " + self.port_mask) self.logger.info("tx interface = " + self.tx_interface) self.logger.info("rx interface = " + self.rx_interface) - self._app_path = self.dut.apps_name["l2fwd-crypto"] + self._app_path = self.sut_node.apps_name["l2fwd-crypto"] - out = self.dut.build_dpdk_apps("./examples/l2fwd-crypto") + out = self.sut_node.build_dpdk_apps("./examples/l2fwd-crypto") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") @@ -382,25 +382,25 @@ class TestL2fwdCrypto(TestCase): self.logger.info("Total Generated {0} Tests".format(len(test_vector_list))) running_case = self.running_case - dut = self.dut.crb["IP"] - dut_index = self._suite_result.internals.index(dut) - target_index = self._suite_result.internals[dut_index + 1].index(self.target) - suite_index = self._suite_result.internals[dut_index + 1][ + sut = self.sut_node.node["IP"] + sut_index = self._suite_result.internals.index(sut) + target_index = self._suite_result.internals[sut_index + 1].index(self.target) + suite_index = self._suite_result.internals[sut_index + 1][ target_index + 2 ].index(self.suite_name) if ( running_case - in self._suite_result.internals[dut_index + 1][target_index + 2][ + in self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ] ): - case_index = self._suite_result.internals[dut_index + 1][target_index + 2][ + case_index = self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ].index(running_case) - self._suite_result.internals[dut_index + 1][target_index + 2][ + self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ].pop(case_index + 1) - self._suite_result.internals[dut_index + 1][target_index + 2][ + self._suite_result.internals[sut_index + 1][target_index + 2][ suite_index + 1 ].pop(case_index) @@ -412,27 +412,27 @@ class TestL2fwdCrypto(TestCase): test_vector, core_mask=self.core_mask, port_mask=self.port_mask ) self._suite_result.test_case = "_".join(self.vector) - self.dut.send_expect(cmd_str, "==", 40) + self.sut_node.send_expect(cmd_str, "==", 40) time.sleep(5) payload = self.__format_hex_to_list(test_vector["input"]) - inst = self.tester.tcpdump_sniff_packets( + inst = self.tg_node.tcpdump_sniff_packets( self.rx_interface, filters=[{"layer": "ether", "config": {"dst": "52:00:00:00:00:01"}}], ) PACKET_COUNT = 65 - pkt = Packet() - pkt.assign_layers(["ether", "ipv4", "raw"]) - pkt.config_layer( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "ipv4", "raw"]) + scapy_pkt_builder.config_layer( "ether", {"src": "52:00:00:00:00:00", "dst": "52:00:00:00:00:01"} ) - pkt.config_layer("ipv4", {"src": "192.168.1.1", "dst": "192.168.1.2"}) - pkt.config_layer("raw", {"payload": payload}) - pkt.send_pkt(self.tester, tx_port=self.tx_interface, count=PACKET_COUNT) + scapy_pkt_builder.config_layer("ipv4", {"src": "192.168.1.1", "dst": "192.168.1.2"}) + scapy_pkt_builder.config_layer("raw", {"payload": payload}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tx_interface, count=PACKET_COUNT) - pkt_rec = self.tester.load_tcpdump_sniff_packets(inst) + pkt_rec = self.tg_node.load_tcpdump_sniff_packets(inst) self.logger.info("Send pkgs: {}".format(PACKET_COUNT)) self.logger.info("Receive pkgs: {}".format(len(pkt_rec))) @@ -464,14 +464,14 @@ class TestL2fwdCrypto(TestCase): if test_vector["auth_algo"] == "null": hash_text = str( binascii.b2a_hex( - pkt_rec.pktgen.pkt["Raw"].getfieldval("load") + pkt_rec.scapy_pkt_util.pkt["Raw"].getfieldval("load") ), encoding="utf-8", ) else: hash_text = str( binascii.b2a_hex( - pkt_rec.pktgen.pkt["Padding"].getfieldval("load") + pkt_rec.scapy_pkt_util.pkt["Padding"].getfieldval("load") ), encoding="utf-8", ) @@ -490,7 +490,7 @@ class TestL2fwdCrypto(TestCase): self.logger.info("Packet Size : %d " % (len(test_vector["input"]) // 2)) # Close l2fwd-crypto process - self.dut.kill_all() + self.sut_node.kill_all() if result: self._suite_result.test_case_passed() @@ -500,7 +500,7 @@ class TestL2fwdCrypto(TestCase): self.verify(result, "Test Failed") def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): pass diff --git a/tests/TestSuite_l2fwd_jobstats.py b/tests/TestSuite_l2fwd_jobstats.py index d4414013..fe504400 100644 --- a/tests/TestSuite_l2fwd_jobstats.py +++ b/tests/TestSuite_l2fwd_jobstats.py @@ -32,19 +32,19 @@ class TestL2fwdJobstats(TestCase): ], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.verify(len(self.dut.cores) >= 4, "Insufficient cores for testing") - cores = self.dut.get_core_list("1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.verify(len(self.sut_node.cores) >= 4, "Insufficient cores for testing") + cores = self.sut_node.get_core_list("1S/4C/1T") self.coremask = utils.create_mask(cores) - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") - dut_port0 = self.dut_ports[0] - dut_port1 = self.dut_ports[1] - self.tx_ports = [dut_port0, dut_port1] + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + sut_port0 = self.sut_ports[0] + sut_port1 = self.sut_ports[1] + self.tx_ports = [sut_port0, sut_port1] # build sample app - out = self.dut.build_dpdk_apps("./examples/l2fwd-jobstats") + out = self.sut_node.build_dpdk_apps("./examples/l2fwd-jobstats") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -58,12 +58,12 @@ class TestL2fwdJobstats(TestCase): """ Verify l2fwd jobstats is correct """ - path = self.dut.apps_name["l2fwd-jobstats"] + path = self.sut_node.apps_name["l2fwd-jobstats"] cmd = path + " %s -- -q 2 -p 0x03 -l" % (self.eal_para) - self.dut.send_expect(cmd, "Port statistics", 60) + self.sut_node.send_expect(cmd, "Port statistics", 60) self.scapy_send_packet(100000) - out = self.dut.get_session_output(timeout=10) + out = self.sut_node.get_session_output(timeout=10) print(out) send_packets = re.findall(r"Total packets sent:\s+?(\d+?)\r", out)[-1] @@ -78,14 +78,14 @@ class TestL2fwdJobstats(TestCase): Send a packet to port """ for i in range(len(self.tx_ports)): - txport = self.tester.get_local_port(self.dut_ports[i]) - mac = self.dut.get_mac_address(self.dut_ports[i]) - txItf = self.tester.get_interface(txport) - self.tester.scapy_append( + txport = self.tg_node.get_local_port(self.sut_ports[i]) + mac = self.sut_node.get_mac_address(self.sut_ports[i]) + txItf = self.tg_node.get_interface(txport) + self.tg_node.scapy_append( 'sendp([Ether(dst="02:00:00:00:00", src="%s")/IP()/UDP()/Raw(\'X\'*18)], iface="%s",count=%s)' % (mac, txItf, count) ) - self.tester.scapy_execute(timeout=120) + self.tg_node.scapy_execute(timeout=120) def tear_down(self): """ @@ -97,4 +97,4 @@ class TestL2fwdJobstats(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_l2tp_esp_coverage.py b/tests/TestSuite_l2tp_esp_coverage.py index c9aaee5b..3303bc63 100644 --- a/tests/TestSuite_l2tp_esp_coverage.py +++ b/tests/TestSuite_l2tp_esp_coverage.py @@ -6,8 +6,8 @@ import re import time import tests.rte_flow_common as rfc -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase, check_supported_nic, skip_unsupported_pkg vf0_mac = "00:11:22:33:44:55" @@ -117,20 +117,20 @@ class L2tpEspCoverage(TestCase): Run at the start of each test suite. Generic filter Prerequistites """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.pmd_output = PmdOutput(self.dut) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.used_dut_port = self.dut_ports[0] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.tx_iface = self.tester.get_interface(localPort) - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.pmd_output = PmdOutput(self.sut_node) + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.used_sut_port = self.sut_ports[0] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.tx_iface = self.tg_node.get_interface(localPort) + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.vf_flag = False self.create_iavf() - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() def set_up(self): """ @@ -140,22 +140,22 @@ class L2tpEspCoverage(TestCase): "test_MAC_IPV4_L2TPv3_HW_checksum_vlan_strip", "test_MAC_IPV4_L2TPv3_SW_checksum_vlan_insertion", ] - self.dut.kill_all() + self.sut_node.kill_all() def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_iavf() if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.pf_interface, self.flag, self.default_stats), "# ", @@ -164,20 +164,20 @@ class L2tpEspCoverage(TestCase): def create_iavf(self): if self.vf_flag is False: - self.dut.bind_interfaces_linux("ice") + self.sut_node.bind_interfaces_linux("ice") # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state( + self.default_stats = self.sut_node.get_priv_flags_state( self.pf_interface, self.flag ) if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.pf_interface, self.flag), "# ", ) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True try: @@ -185,7 +185,7 @@ class L2tpEspCoverage(TestCase): port.bind_driver(self.drivername) self.vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf_interface, vf0_mac), "# " ) except Exception as e: @@ -194,7 +194,7 @@ class L2tpEspCoverage(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def create_testpmd_command(self, port_info, rx_checksum=0): @@ -210,29 +210,29 @@ class L2tpEspCoverage(TestCase): self.pmd_output.start_testpmd( cores="1S/8C/1T", param=param_str, eal_param="-a %s" % port_pci ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) def enable_checksum(self, param_type="hw"): - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("csum set ip %s 0" % param_type, "testpmd> ") - self.dut.send_expect("csum set udp %s 0" % param_type, "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("set fwd csum", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("csum set ip %s 0" % param_type, "testpmd> ") + self.sut_node.send_expect("csum set udp %s 0" % param_type, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("set fwd csum", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") def checksum_verify(self, packets_sent): # Send packet. - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() for packet_type in list(packets_sent.keys()): - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' % (packets_sent[packet_type], self.tx_iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out = self.pmd_output.execute_cmd("stop") bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out) @@ -295,8 +295,8 @@ class L2tpEspCoverage(TestCase): self.checksum_verify(tv_MAC_IPV6_NAT_T_ESP_chksum) def start_tcpdump(self, rxItf): - self.tester.send_expect("rm -rf getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -A -nn -e -vv -w getPackageByTcpdump.cap -i %s 2> /dev/null& " % rxItf, "#", @@ -305,47 +305,47 @@ class L2tpEspCoverage(TestCase): def get_tcpdump_package(self): time.sleep(1) - self.tester.send_expect("killall tcpdump", "#") - return self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + return self.tg_node.send_expect( "tcpdump -A -nn -e -vv -r getPackageByTcpdump.cap", "#" ) def vlan_strip_insertion_verify(self, packets_sent): - # disabel vlan strip, tester will receive the pkt with vlan id - self.dut.send_expect("vlan set filter on 0", "testpmd> ") - self.dut.send_expect("vlan set strip off 0", "testpmd> ") - self.dut.send_expect("rx_vlan add 1 0", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + # disabel vlan strip, TG will receive the pkt with vlan id + self.sut_node.send_expect("vlan set filter on 0", "testpmd> ") + self.sut_node.send_expect("vlan set strip off 0", "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 0", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.start_tcpdump(self.tx_iface) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' % (packets_sent["matched vlan"], self.tx_iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out = self.pmd_output.execute_cmd("stop") tcpdump_out = self.get_tcpdump_package() receive_pkt = re.findall("vlan 1", tcpdump_out) self.verify(len(receive_pkt) == 2, "vlan id strip off failed") - self.dut.send_expect("start", "testpmd> ") - self.tester.scapy_append( + self.sut_node.send_expect("start", "testpmd> ") + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' % (packets_sent["dismatched vlan"], self.tx_iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out = self.pmd_output.execute_cmd("stop") pkts = rfc.get_port_rx_packets_number(out, 0) self.verify(pkts == 0, "vlan id filter failed") - # enable vlan strip, tester will receive the pkt without vlan id - self.dut.send_expect("vlan set strip on 0", "testpmd> ") + # enable vlan strip, TG will receive the pkt without vlan id + self.sut_node.send_expect("vlan set strip on 0", "testpmd> ") self.start_tcpdump(self.tx_iface) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' % (packets_sent["matched vlan"], self.tx_iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out = self.pmd_output.execute_cmd("stop") tcpdump_out = self.get_tcpdump_package() @@ -353,18 +353,18 @@ class L2tpEspCoverage(TestCase): self.verify(len(receive_pkt) == 1, "vlan id strip on failed") # vlan insertion - self.dut.send_expect("vlan set strip off 0", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan set 0 1", "testpmd> ") - self.dut.send_expect("vlan set filter on 0", "testpmd> ") - self.dut.send_expect("rx_vlan add 1 0", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("vlan set strip off 0", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan set 0 1", "testpmd> ") + self.sut_node.send_expect("vlan set filter on 0", "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 0", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.start_tcpdump(self.tx_iface) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' % (packets_sent["no vlan"], self.tx_iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(1) out = self.pmd_output.execute_cmd("stop") tcpdump_out = self.get_tcpdump_package() @@ -410,41 +410,41 @@ class L2tpEspCoverage(TestCase): """ self.send_packets(pkts, pf_id) time.sleep(1) - out_info = self.dut.get_session_output(timeout=1) + out_info = self.sut_node.get_session_output(timeout=1) out_pkt = self.pmd_output.execute_cmd("stop") out = out_info + out_pkt self.pmd_output.execute_cmd("start") return out def send_packets(self, packets, pf_id=0): - self.pkt.update_pkt(packets) + self.scapy_pkt_builder.update_pkt(packets) tx_port = self.tx_iface - self.pkt.send_pkt(crb=self.tester, tx_port=tx_port) + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_port) @skip_unsupported_pkg(["os default", "wireless"]) def test_MAC_IPV4_L2TPv3_HW_checksum_vlan_strip(self): self.create_testpmd_command(self.vf0_prop, rx_checksum=1) # vlan strip on - self.dut.send_expect("vlan set filter on 0", "testpmd> ") - self.dut.send_expect("vlan set strip on 0", "testpmd> ") - self.dut.send_expect("rx_vlan add 1 0", "testpmd> ") + self.sut_node.send_expect("vlan set filter on 0", "testpmd> ") + self.sut_node.send_expect("vlan set strip on 0", "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 0", "testpmd> ") self.enable_checksum() # create rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1 / end actions queue index 1 / mark id 4 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 2 / end actions queue index 2 / mark id 3 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 3 / end actions queue index 3 / mark id 2 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 4 / end actions queue index 4 / mark id 1 / end", "testpmd> ", ) @@ -483,7 +483,7 @@ class L2tpEspCoverage(TestCase): self.verify(len(receive_pkt) == 1, "vlan id strip on failed") # destroy rule - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # matched vlan id + bad checksum + matched session id pkts = "Ether(dst='00:11:22:33:44:55')/Dot1Q(vlan=1)/IP(proto=115,chksum=0x123)/L2TP(b'\\x00\\x00\\x00\\x01')/Raw('x'*480)" self.start_tcpdump(self.tx_iface) @@ -508,30 +508,30 @@ class L2tpEspCoverage(TestCase): self.create_testpmd_command(self.vf0_prop, rx_checksum=1) # vlan insertion on - self.dut.send_expect("vlan set strip off 0", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan set 0 1", "testpmd> ") - self.dut.send_expect("vlan set filter on 0", "testpmd> ") - self.dut.send_expect("rx_vlan add 1 0", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("vlan set strip off 0", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan set 0 1", "testpmd> ") + self.sut_node.send_expect("vlan set filter on 0", "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 0", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # create rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 1 / end actions queue index 1 / mark id 4 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 2 / end actions queue index 2 / mark id 3 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 3 / end actions queue index 3 / mark id 2 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / l2tpv3oip session_id is 4 / end actions queue index 4 / mark id 1 / end", "testpmd> ", ) @@ -566,7 +566,7 @@ class L2tpEspCoverage(TestCase): self.verify(bad_ipcsum == 1, "bad ip csum check error") # destroy rule - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # bad checksum + matched session id pkts = "Ether(dst='00:11:22:33:44:55')/IP(proto=115,chksum=0x123)/L2TP(b'\\x00\\x00\\x00\\x01')/Raw('x'*480)" self.start_tcpdump(self.tx_iface) @@ -587,25 +587,25 @@ class L2tpEspCoverage(TestCase): self.create_testpmd_command(self.vf0_prop, rx_checksum=1) # vlan strip on - self.dut.send_expect("vlan set filter on 0", "testpmd> ") - self.dut.send_expect("vlan set strip on 0", "testpmd> ") - self.dut.send_expect("rx_vlan add 1 0", "testpmd> ") + self.sut_node.send_expect("vlan set filter on 0", "testpmd> ") + self.sut_node.send_expect("vlan set strip on 0", "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 0", "testpmd> ") self.enable_checksum() # create rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / esp spi is 1 / end actions queue index 1 / mark id 4 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / esp spi is 2 / end actions queue index 2 / mark id 3 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / esp spi is 3 / end actions queue index 3 / mark id 2 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / esp spi is 4 / end actions queue index 4 / mark id 1 / end", "testpmd> ", ) @@ -644,7 +644,7 @@ class L2tpEspCoverage(TestCase): self.verify(len(receive_pkt) == 1, "vlan id strip on failed") # destroy rule - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # matched vlan id + bad checksum + matched session id pkts = "Ether(dst='00:11:22:33:44:55')/Dot1Q(vlan=1)/IP(proto=50,chksum=0x123)/ESP(spi=1)/Raw('x'*480)" self.start_tcpdump(self.tx_iface) @@ -669,30 +669,30 @@ class L2tpEspCoverage(TestCase): self.create_testpmd_command(self.vf0_prop, rx_checksum=1) # vlan insertion on - self.dut.send_expect("vlan set strip off 0", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan set 0 1", "testpmd> ") - self.dut.send_expect("vlan set filter on 0", "testpmd> ") - self.dut.send_expect("rx_vlan add 1 0", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("vlan set strip off 0", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan set 0 1", "testpmd> ") + self.sut_node.send_expect("vlan set filter on 0", "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 0", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # create rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / esp spi is 1 / end actions queue index 1 / mark id 4 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / esp spi is 2 / end actions queue index 2 / mark id 3 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / esp spi is 3 / end actions queue index 3 / mark id 2 / end", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / esp spi is 4 / end actions queue index 4 / mark id 1 / end", "testpmd> ", ) @@ -726,7 +726,7 @@ class L2tpEspCoverage(TestCase): self.verify(bad_ipcsum == 1, "bad ip csum check error") # destroy rule - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # bad checksum + matched session id pkts = "Ether(dst='00:11:22:33:44:55')/IP(chksum=0x123)/UDP(dport=4500)/ESP(spi=1)/Raw('x'*480)" self.start_tcpdump(self.tx_iface) diff --git a/tests/TestSuite_l3fwd.py b/tests/TestSuite_l3fwd.py index 815e4f06..06acb016 100644 --- a/tests/TestSuite_l3fwd.py +++ b/tests/TestSuite_l3fwd.py @@ -22,13 +22,13 @@ class TestL3fwd(TestCase, PerfTestBase): L3fwd Prerequisites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/8C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/8C/1T", socket=socket) self.verify(cores is not None, "Insufficient cores for speed testing") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket) @@ -51,7 +51,7 @@ class TestL3fwd(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_rfc2544_ipv4_lpm(self): diff --git a/tests/TestSuite_l3fwd_func.py b/tests/TestSuite_l3fwd_func.py index 1662d091..b3a8981c 100644 --- a/tests/TestSuite_l3fwd_func.py +++ b/tests/TestSuite_l3fwd_func.py @@ -2,7 +2,7 @@ # Copyright(c) 2022 Intel Corporation # -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -16,18 +16,18 @@ class TestL3fwdFunc(TestCase): Run at the start of each test suite. L3fwd Prerequisites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.app_path = self.dut.build_dpdk_apps("examples/l3fwd") - self.pkt = Packet() - self.dport_info0 = self.dut.ports_info[self.dut_ports[0]] - tport = self.tester.get_local_port(self.dut_ports[0]) - self.tport_info0 = self.tester.ports_info[tport] + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.app_path = self.sut_node.build_dpdk_apps("examples/l3fwd") + self.scapy_pkt_builder = ScapyPacketBuilder() + self.dport_info0 = self.sut_node.ports_info[self.sut_ports[0]] + tport = self.tg_node.get_local_port(self.sut_ports[0]) + self.tport_info0 = self.tg_node.ports_info[tport] self.tport_intf0 = self.tport_info0["intf"] # judgment is added to avoid errors caused by the absence of port 1 - if len(self.dut_ports) >= 2: - self.dport_info1 = self.dut.ports_info[self.dut_ports[1]] - self.tport_info1 = self.tester.ports_info[self.dut_ports[1]] + if len(self.sut_ports) >= 2: + self.dport_info1 = self.sut_node.ports_info[self.sut_ports[1]] + self.tport_info1 = self.tg_node.ports_info[self.sut_ports[1]] self.tport_intf1 = self.tport_info1["intf"] self.ip_src = "1.2.3.4" self.ip_dst = "198.168.0.%s" @@ -48,7 +48,7 @@ class TestL3fwdFunc(TestCase): :return: echo information after launch l3fwd """ expected = "Link up" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "%s %s -- %s" % (self.app_path, eal_params, params), expected, timeout=30 ) return out @@ -104,8 +104,8 @@ class TestL3fwdFunc(TestCase): :param type: the type of packet, ipv4 or ipv6 :return: """ - out = self.tester.send_expect( - "tcpdump -n -r /tmp/tester/sniff_%s.pcap" % (self.tport_intf0), + out = self.tg_node.send_expect( + "tcpdump -n -r /tmp/tg/sniff_%s.pcap" % (self.tport_intf0), "# ", timeout=30, ) @@ -133,8 +133,8 @@ class TestL3fwdFunc(TestCase): """ 1 port 1 queue with default setting """ - eal_params = self.dut.create_eal_parameters( - cores=[1], ports=self.dut_ports[0:1] + eal_params = self.sut_node.create_eal_parameters( + cores=[1], ports=self.sut_ports[0:1] ) params = '-p 0x1 --config="(0,0,1)" --eth-dest=0,b4:96:91:9f:64:b9' out = self.launch_l3fwd(eal_params, params) @@ -151,11 +151,11 @@ class TestL3fwdFunc(TestCase): src = self.tport_info0["mac"] for type in packets.keys(): pkts = self.build_packet(packets[type], type, 10, match_dst, src) - inst = self.tester.tcpdump_sniff_packets(self.tport_intf0) - self.pkt.pktgen.pkts = [] - self.pkt.update_pkt(pkts) - self.pkt.send_pkt(self.tester, self.tport_intf0) - self.tester.load_tcpdump_sniff_packets(inst) + inst = self.tg_node.tcpdump_sniff_packets(self.tport_intf0) + self.scapy_pkt_builder.scapy_pkt_util.pkts = [] + self.scapy_pkt_builder.update_pkt(pkts) + self.scapy_pkt_builder.send_pkt(self.tg_node, self.tport_intf0) + self.tg_node.load_tcpdump_sniff_packets(inst) self.check_package_received(len(pkts), type) def test_1_port_4_queues_non_default(self): @@ -163,9 +163,9 @@ class TestL3fwdFunc(TestCase): 1 port 4 queue with non-default setting """ # if port number > 1, skip this case - self.skip_case(len(self.dut_ports) <= 1, "Only support 1 port") - eal_params = self.dut.create_eal_parameters( - cores=[1, 2], ports=self.dut_ports[0:2] + self.skip_case(len(self.sut_ports) <= 1, "Only support 1 port") + eal_params = self.sut_node.create_eal_parameters( + cores=[1, 2], ports=self.sut_ports[0:2] ) params = ( '-p 0x1 --config="(0,0,1),(0,1,1),(0,2,2),(0,3,2)" -P ' @@ -185,11 +185,11 @@ class TestL3fwdFunc(TestCase): src = self.tport_info0["mac"] for type in packets.keys(): pkts = self.build_packet(packets[type], type, 20, unmatch_dst, src) - inst = self.tester.tcpdump_sniff_packets(self.tport_intf0) - self.pkt.pktgen.pkts = [] - self.pkt.update_pkt(pkts) - self.pkt.send_pkt(self.tester, self.tport_intf0) - self.tester.load_tcpdump_sniff_packets(inst) + inst = self.tg_node.tcpdump_sniff_packets(self.tport_intf0) + self.scapy_pkt_builder.scapy_pkt_util.pkts = [] + self.scapy_pkt_builder.update_pkt(pkts) + self.scapy_pkt_builder.send_pkt(self.tg_node, self.tport_intf0) + self.tg_node.load_tcpdump_sniff_packets(inst) self.check_package_received(len(pkts), type) def test_2_ports_4_queues_non_default(self): @@ -197,9 +197,9 @@ class TestL3fwdFunc(TestCase): 2 ports 4 queues with non-default setting """ # if port number < 2, skip this case - self.skip_case(len(self.dut_ports) >= 2, "At least 2 ports are required") - eal_params = self.dut.create_eal_parameters( - cores=[1, 2], ports=self.dut_ports[0:2] + self.skip_case(len(self.sut_ports) >= 2, "At least 2 ports are required") + eal_params = self.sut_node.create_eal_parameters( + cores=[1, 2], ports=self.sut_ports[0:2] ) params = ( '-p 0x3 --config="(0,0,1),(0,1,1),(0,2,2),(0,3,2),(1,0,1),(1,1,1),(1,2,2),(1,3,2)" -P ' @@ -224,19 +224,19 @@ class TestL3fwdFunc(TestCase): for type in packets.keys(): # port 0 pkts0 = self.build_packet(packets[type], type, 20, unmatch_dst, src0) - inst0 = self.tester.tcpdump_sniff_packets(self.tport_intf0) - self.pkt.pktgen.pkts = [] - self.pkt.update_pkt(pkts0) - self.pkt.send_pkt(self.tester, self.tport_intf0) - self.tester.load_tcpdump_sniff_packets(inst0) + inst0 = self.tg_node.tcpdump_sniff_packets(self.tport_intf0) + self.scapy_pkt_builder.scapy_pkt_util.pkts = [] + self.scapy_pkt_builder.update_pkt(pkts0) + self.scapy_pkt_builder.send_pkt(self.tg_node, self.tport_intf0) + self.tg_node.load_tcpdump_sniff_packets(inst0) self.check_package_received(len(pkts0), type) # port 1 pkts1 = self.build_packet(packets[type], type, 20, unmatch_dst, src1) - inst1 = self.tester.tcpdump_sniff_packets(self.tport_intf1) - self.pkt.update_pkt(pkts1) - self.pkt.send_pkt(self.tester, self.tport_intf1) - self.tester.load_tcpdump_sniff_packets(inst1) + inst1 = self.tg_node.tcpdump_sniff_packets(self.tport_intf1) + self.scapy_pkt_builder.update_pkt(pkts1) + self.scapy_pkt_builder.send_pkt(self.tg_node, self.tport_intf1) + self.tg_node.load_tcpdump_sniff_packets(inst1) self.check_package_received(len(pkts1), type) def tear_down(self): @@ -244,8 +244,8 @@ class TestL3fwdFunc(TestCase): run after each test case. """ # close l3fwd - self.dut.send_expect("^C", "# ") - self.dut.kill_all() + self.sut_node.send_expect("^C", "# ") + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_l3fwd_lpm_ipv4.py b/tests/TestSuite_l3fwd_lpm_ipv4.py index 1f9c1b8a..a4324bc4 100644 --- a/tests/TestSuite_l3fwd_lpm_ipv4.py +++ b/tests/TestSuite_l3fwd_lpm_ipv4.py @@ -22,13 +22,13 @@ class TestL3fwdLpmIpv4(TestCase, PerfTestBase): L3fwd Prerequisites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/8C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/8C/1T", socket=socket) self.verify(cores is not None, "Insufficient cores for speed testing") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket) @@ -51,7 +51,7 @@ class TestL3fwdLpmIpv4(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_throughput_ipv4_lpm(self): diff --git a/tests/TestSuite_l3fwd_lpm_ipv4_rfc2544.py b/tests/TestSuite_l3fwd_lpm_ipv4_rfc2544.py index b9c8689c..290b9e0a 100644 --- a/tests/TestSuite_l3fwd_lpm_ipv4_rfc2544.py +++ b/tests/TestSuite_l3fwd_lpm_ipv4_rfc2544.py @@ -22,13 +22,13 @@ class TestL3fwdLpmIpv4Rfc2544(TestCase, PerfTestBase): L3fwd Prerequisites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/8C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/8C/1T", socket=socket) self.verify(cores is not None, "Insufficient cores for speed testing") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket) @@ -51,7 +51,7 @@ class TestL3fwdLpmIpv4Rfc2544(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_rfc2544_ipv4_lpm(self): diff --git a/tests/TestSuite_l3fwd_lpm_ipv6.py b/tests/TestSuite_l3fwd_lpm_ipv6.py index 5e7d82ed..d06579bd 100644 --- a/tests/TestSuite_l3fwd_lpm_ipv6.py +++ b/tests/TestSuite_l3fwd_lpm_ipv6.py @@ -21,13 +21,13 @@ class TestL3fwdLpmIpv6(TestCase, PerfTestBase): L3fwd Prerequisites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/8C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/8C/1T", socket=socket) self.verify(cores is not None, "Insufficient cores for speed testing") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket) @@ -50,7 +50,7 @@ class TestL3fwdLpmIpv6(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_throughput_ipv6_lpm(self): diff --git a/tests/TestSuite_l3fwdacl.py b/tests/TestSuite_l3fwdacl.py index 0876b978..8b1a353f 100644 --- a/tests/TestSuite_l3fwdacl.py +++ b/tests/TestSuite_l3fwdacl.py @@ -10,7 +10,7 @@ Layer-3 forwarding ACL test script. import re import time -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.test_case import TestCase @@ -360,42 +360,42 @@ class TestL3fwdacl(TestCase): self.app_l3fwd_acl_path, self.eal_para, self.port_mask, - self.dut_ports[0], - self.dut_ports[1], + self.sut_ports[0], + self.sut_ports[1], TestL3fwdacl.acl_ipv4_db, TestL3fwdacl.acl_ipv6_db, extra_args, ) ) - out = self.dut.send_expect(cmdline, "L3FWD:", 30) + out = self.sut_node.send_expect(cmdline, "L3FWD:", 30) def get_core_list(self): - self.sock0ports = self.dut.get_ports(self.nic, socket=0) - self.sock1ports = self.dut.get_ports(self.nic, socket=1) + self.sock0ports = self.sut_node.get_ports(self.nic, socket=0) + self.sock1ports = self.sut_node.get_ports(self.nic, socket=1) if len(self.sock0ports) > 0 and len(self.sock1ports) > 0: - return self.dut.get_core_list("2S/4C/2T") + return self.sut_node.get_core_list("2S/4C/2T") else: - return self.dut.get_core_list("1S/4C/1T") + return self.sut_node.get_core_list("1S/4C/1T") def rule_cfg_init(self, acl_ipv4_db, acl_ipv6_db): """ initialize the acl rule file """ if acl_ipv4_db: - self.dut.send_expect("echo '' > %s" % acl_ipv4_db, "# ") - self.dut.send_expect( + self.sut_node.send_expect("echo '' > %s" % acl_ipv4_db, "# ") + self.sut_node.send_expect( "echo 'R0.0.0.0/0 0.0.0.0/0 0 : 65535 0 : 65535 0x00/0x00 %s' >> %s" - % (self.dut_ports[1], acl_ipv4_db), + % (self.sut_ports[1], acl_ipv4_db), "# ", ) if acl_ipv6_db: - self.dut.send_expect("echo '' > %s" % acl_ipv6_db, "# ") - self.dut.send_expect( + self.sut_node.send_expect("echo '' > %s" % acl_ipv6_db, "# ") + self.sut_node.send_expect( "echo 'R0:0:0:0:0:0:0:0/0 0:0:0:0:0:0:0:0/0 0 : 65535 0 : 65535 0x00/0x00 %s' >> %s" - % (self.dut_ports[1], acl_ipv6_db), + % (self.sut_ports[1], acl_ipv6_db), "# ", ) @@ -403,8 +403,8 @@ class TestL3fwdacl(TestCase): """ delete the acle rule file """ - self.dut.send_expect("rm -rf %s" % acl_ipv4_db, "# ") - self.dut.send_expect("rm -rf %s" % acl_ipv6_db, "# ") + self.sut_node.send_expect("rm -rf %s" % acl_ipv4_db, "# ") + self.sut_node.send_expect("rm -rf %s" % acl_ipv6_db, "# ") def create_ipv4_ip_not_match(self, ip_address): """ @@ -445,8 +445,8 @@ class TestL3fwdacl(TestCase): """ send a packet not match rule and return whether forwarded """ - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) if rule["sIpAddr"] != "ALL": rule["sIpAddr"] = self.create_ipv4_ip_not_match(rule["sIpAddr"]) if rule["dIpAddr"] != "ALL": @@ -465,10 +465,10 @@ class TestL3fwdacl(TestCase): dst_filter = {"layer": "ether", "config": {"dst": "not ff:ff:ff:ff:ff:ff"}} filters = [dst_filter] - inst = self.tester.tcpdump_sniff_packets(rx_interface, filters=filters) - pkt = packet.Packet() - pkt.append_pkt(ethernet_str) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, timeout=30) + inst = self.tg_node.tcpdump_sniff_packets(rx_interface, filters=filters) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.append_pkt(ethernet_str) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, timeout=30) out = self.remove_dhcp_from_revpackets(inst) return len(out) @@ -476,8 +476,8 @@ class TestL3fwdacl(TestCase): """ send a packet not match rule and return whether forwardeid """ - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) if rule["sIpAddr"] != "ALL": rule["sIpAddr"] = self.create_ipv6_ip_not_match(rule["sIpAddr"]) if rule["dIpAddr"] != "ALL": @@ -495,10 +495,10 @@ class TestL3fwdacl(TestCase): ethernet_str = self.create_ipv6_rule_string(rule, "Ether") fil = [{"layer": "ether", "config": {"dst": "not ff:ff:ff:ff:ff:ff"}}] - inst = self.tester.tcpdump_sniff_packets(rx_interface, filters=fil) - pkt = packet.Packet() - pkt.append_pkt(ethernet_str) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, timeout=30) + inst = self.tg_node.tcpdump_sniff_packets(rx_interface, filters=fil) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.append_pkt(ethernet_str) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, timeout=30) out = self.remove_dhcp_from_revpackets(inst) return len(out) @@ -507,16 +507,16 @@ class TestL3fwdacl(TestCase): """ send a packet match rule and return whether forwarded """ - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) etherStr = self.create_ipv4_rule_string(rule, "Ether") dst_filter = {"layer": "ether", "config": {"dst": "not ff:ff:ff:ff:ff:ff"}} filters = [dst_filter] - inst = self.tester.tcpdump_sniff_packets(rx_interface, filters=filters) - pkt = packet.Packet() - pkt.append_pkt(etherStr) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, timeout=30) + inst = self.tg_node.tcpdump_sniff_packets(rx_interface, filters=filters) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.append_pkt(etherStr) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, timeout=30) out = self.remove_dhcp_from_revpackets(inst) return len(out) @@ -524,22 +524,22 @@ class TestL3fwdacl(TestCase): """ send a packet match rule and return whether forwarded """ - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) etherStr = self.create_ipv6_rule_string(rule, "Ether") fil = [{"layer": "ether", "config": {"dst": "not ff:ff:ff:ff:ff:ff"}}] - inst = self.tester.tcpdump_sniff_packets(rx_interface, filters=fil) - pkt = packet.Packet() - pkt.append_pkt(etherStr) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, timeout=30) + inst = self.tg_node.tcpdump_sniff_packets(rx_interface, filters=fil) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.append_pkt(etherStr) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, timeout=30) out = self.remove_dhcp_from_revpackets(inst) return len(out) def remove_dhcp_from_revpackets(self, inst): - p = self.tester.load_tcpdump_sniff_packets(inst, timeout=5) - pkts = p.pktgen.pkts + scapy_pkt_builder = self.tg_node.load_tcpdump_sniff_packets(inst, timeout=5) + pkts = scapy_pkt_builder.scapy_pkt_util.pkts i = 0 while len(pkts) != 0 and i <= len(pkts) - 1: if pkts[i].haslayer("DHCP"): @@ -611,7 +611,7 @@ class TestL3fwdacl(TestCase): port = rule["Port"] - destination_mac = self.dut.get_mac_address(self.dut_ports[0]) + destination_mac = self.sut_node.get_mac_address(self.sut_ports[0]) rule_str = TestL3fwdacl.rule_format % ( acl_promt, @@ -700,7 +700,7 @@ class TestL3fwdacl(TestCase): port = rule["Port"] - destination_mac = self.dut.get_mac_address(self.dut_ports[0]) + destination_mac = self.sut_node.get_mac_address(self.sut_ports[0]) rule_str = TestL3fwdacl.rule_format % ( acl_promt, @@ -731,10 +731,10 @@ class TestL3fwdacl(TestCase): create rule.db from rule_list """ - self.dut.send_expect("echo '' > %s" % TestL3fwdacl.acl_ipv4_db, "# ") + self.sut_node.send_expect("echo '' > %s" % TestL3fwdacl.acl_ipv4_db, "# ") for rule in rule_list: rule_str = self.create_ipv4_rule_string(rule, rule_type="DataBase") - self.dut.send_expect( + self.sut_node.send_expect( "echo %s >> %s" % (rule_str, TestL3fwdacl.acl_ipv4_db), "# " ) @@ -745,10 +745,10 @@ class TestL3fwdacl(TestCase): create rule.db from rule_list """ - self.dut.send_expect("echo '' > %s" % TestL3fwdacl.acl_ipv6_db, "# ") + self.sut_node.send_expect("echo '' > %s" % TestL3fwdacl.acl_ipv6_db, "# ") for rule in rule_list: rule_str = self.create_ipv6_rule_string(rule, rule_type="DataBase") - self.dut.send_expect( + self.sut_node.send_expect( "echo %s >> %s" % (rule_str, TestL3fwdacl.acl_ipv6_db), "# " ) @@ -765,13 +765,13 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl() - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv4_packet_match(acl_rule, tx_port, rx_port) out2 = self.send_ipv4_packet_not_match(acl_rule, tx_port, rx_port) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 <= 0, "Rx port receive unexpected packet") self.verify(out2 >= 1, "Rx port not receive expected packet") @@ -786,13 +786,13 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl() - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv6_packet_match(acl_rule, tx_port, rx_port) out2 = self.send_ipv6_packet_not_match(acl_rule, tx_port, rx_port) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 <= 0, "Rx port receive unexpected packet") self.verify(out2 >= 1, "Rx port not receive expected packet") @@ -812,16 +812,16 @@ class TestL3fwdacl(TestCase): self.app_l3fwd_acl_path, self.eal_para, self.port_mask, - self.dut_ports[0], - self.dut_ports[1], + self.sut_ports[0], + self.sut_ports[1], TestL3fwdacl.acl_ipv4_db, TestL3fwdacl.acl_ipv6_db, ) ) - out = self.dut.send_expect(cmdline, "# ", 30) + out = self.sut_node.send_expect(cmdline, "# ", 30) self.verify("rules error" in out, "l3fwd not detect invalid rule") - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def invalid_acl_ipv6_test(self, acl_rule): """ @@ -839,16 +839,16 @@ class TestL3fwdacl(TestCase): self.app_l3fwd_acl_path, self.eal_para, self.port_mask, - self.dut_ports[0], - self.dut_ports[1], + self.sut_ports[0], + self.sut_ports[1], TestL3fwdacl.acl_ipv4_db, TestL3fwdacl.acl_ipv6_db, ) ) - out = self.dut.send_expect(cmdline, "# ", 30) + out = self.sut_node.send_expect(cmdline, "# ", 30) self.verify("rules error" in out, "l3fwd not detect invalid rule") - self.dut.send_expect("^C", "#", 5) + self.sut_node.send_expect("^C", "#", 5) def set_up_all(self): """ @@ -857,41 +857,41 @@ class TestL3fwdacl(TestCase): l3fwd Acl Prerequisites """ - # Based on h/w type, choose how many dut_ports to use - ports = self.dut.get_ports(self.nic) + # Based on h/w type, choose how many sut_ports to use + ports = self.sut_node.get_ports(self.nic) - # Verify that enough dut_ports are available - self.verify(len(ports) >= 2, "Insufficient dut_ports for speed testing") + # Verify that enough sut_ports are available + self.verify(len(ports) >= 2, "Insufficient sut_ports for speed testing") # Verify that enough threads are available cores = self.get_core_list() self.verify(cores is not None, "Insufficient cores for speed testing") - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores=self.get_core_list(), other_eal_param="force-max-simd-bitwidth" ) self.core_mask = utils.create_mask(cores) print("Core mask: %s" % self.core_mask) - if self.dut.dpdk_version >= "20.11.0": + if self.sut_node.dpdk_version >= "20.11.0": self.eal_para += " --force-max-simd-bitwidth=0" - valid_ports = [port for port in ports if self.tester.get_local_port(port) != -1] + valid_ports = [port for port in ports if self.tg_node.get_local_port(port) != -1] self.verify( - len(valid_ports) >= 2, "Insufficient active dut_ports for speed testing" + len(valid_ports) >= 2, "Insufficient active sut_ports for speed testing" ) - self.dut_ports = valid_ports - print("Valid ports found in DUT: %s" % self.dut_ports) + self.sut_ports = valid_ports + print("Valid ports found in SUT: %s" % self.sut_ports) - self.port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + self.port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) print("Port mask: %s" % self.port_mask) - TestL3fwdacl.default_rule["Port"] = self.dut_ports[1] + TestL3fwdacl.default_rule["Port"] = self.sut_ports[1] # compile l3fwd-acl - out = self.dut.build_dpdk_apps("examples/l3fwd-acl") - self.app_l3fwd_acl_path = self.dut.apps_name["l3fwd-acl"] + out = self.sut_node.build_dpdk_apps("examples/l3fwd-acl") + self.app_l3fwd_acl_path = self.sut_node.apps_name["l3fwd-acl"] self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -919,8 +919,8 @@ class TestL3fwdacl(TestCase): rule_list_ipv4 = [] - TestL3fwdacl.exact_rule_list_ipv4[0]["Port"] = self.dut_ports[0] - TestL3fwdacl.exact_rule_list_ipv4[1]["Port"] = self.dut_ports[1] + TestL3fwdacl.exact_rule_list_ipv4[0]["Port"] = self.sut_ports[0] + TestL3fwdacl.exact_rule_list_ipv4[1]["Port"] = self.sut_ports[1] rule_list_ipv4.append(TestL3fwdacl.exact_rule_list_ipv4[0]) rule_list_ipv4.append(TestL3fwdacl.exact_rule_list_ipv4[1]) @@ -928,8 +928,8 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl() - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv4_packet_match( TestL3fwdacl.exact_rule_list_ipv4[0], tx_port, tx_port @@ -938,15 +938,15 @@ class TestL3fwdacl(TestCase): TestL3fwdacl.exact_rule_list_ipv4[1], tx_port, rx_port ) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 >= 1, "Rx port0 not receive expected packet") self.verify(out2 >= 1, "Rx port1 not receive expected packet") rule_list_ipv6 = [] - TestL3fwdacl.exact_rule_list_ipv6[0]["Port"] = self.dut_ports[0] - TestL3fwdacl.exact_rule_list_ipv6[1]["Port"] = self.dut_ports[1] + TestL3fwdacl.exact_rule_list_ipv6[0]["Port"] = self.sut_ports[0] + TestL3fwdacl.exact_rule_list_ipv6[1]["Port"] = self.sut_ports[1] rule_list_ipv6.append(TestL3fwdacl.exact_rule_list_ipv6[0]) rule_list_ipv6.append(TestL3fwdacl.exact_rule_list_ipv6[1]) @@ -954,8 +954,8 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl() - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv6_packet_match( TestL3fwdacl.exact_rule_list_ipv6[0], tx_port, tx_port @@ -964,7 +964,7 @@ class TestL3fwdacl(TestCase): TestL3fwdacl.exact_rule_list_ipv6[1], tx_port, rx_port ) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 >= 1, "Rx port0 not receive expected packet") self.verify(out2 >= 1, "Rx port1 not receive expected packet") @@ -980,8 +980,8 @@ class TestL3fwdacl(TestCase): rule_list_ipv4 = [] - TestL3fwdacl.lpm_rule_list_ipv4[0]["Port"] = self.dut_ports[0] - TestL3fwdacl.lpm_rule_list_ipv4[1]["Port"] = self.dut_ports[1] + TestL3fwdacl.lpm_rule_list_ipv4[0]["Port"] = self.sut_ports[0] + TestL3fwdacl.lpm_rule_list_ipv4[1]["Port"] = self.sut_ports[1] rule_list_ipv4.append(TestL3fwdacl.lpm_rule_list_ipv4[0]) rule_list_ipv4.append(TestL3fwdacl.lpm_rule_list_ipv4[1]) @@ -989,8 +989,8 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl() - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv4_packet_match( TestL3fwdacl.lpm_rule_list_ipv4[0], tx_port, tx_port @@ -999,15 +999,15 @@ class TestL3fwdacl(TestCase): TestL3fwdacl.lpm_rule_list_ipv4[1], tx_port, rx_port ) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 >= 1, "Rx port0 not receive expected packet") self.verify(out2 >= 1, "Rx port1 not receive expected packet") rule_list_ipv6 = [] - TestL3fwdacl.lpm_rule_list_ipv6[0]["Port"] = self.dut_ports[0] - TestL3fwdacl.lpm_rule_list_ipv6[1]["Port"] = self.dut_ports[1] + TestL3fwdacl.lpm_rule_list_ipv6[0]["Port"] = self.sut_ports[0] + TestL3fwdacl.lpm_rule_list_ipv6[1]["Port"] = self.sut_ports[1] rule_list_ipv6.append(TestL3fwdacl.lpm_rule_list_ipv6[0]) rule_list_ipv6.append(TestL3fwdacl.lpm_rule_list_ipv6[1]) @@ -1015,8 +1015,8 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl() - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv6_packet_match( TestL3fwdacl.lpm_rule_list_ipv6[0], tx_port, tx_port @@ -1025,7 +1025,7 @@ class TestL3fwdacl(TestCase): TestL3fwdacl.lpm_rule_list_ipv6[1], tx_port, rx_port ) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 >= 1, "Rx port0 not receive expected packet") self.verify(out2 >= 1, "Rx port1 not receive expected packet") @@ -1046,8 +1046,8 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl(scalar=True) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv4_packet_match( TestL3fwdacl.scalar_rule_list_ipv4[0], tx_port, rx_port @@ -1056,7 +1056,7 @@ class TestL3fwdacl(TestCase): TestL3fwdacl.scalar_rule_list_ipv4[0], tx_port, rx_port ) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 <= 0, "Rx port received unexpected packet") self.verify(out2 >= 1, "Rx port not receive expected packet") @@ -1068,8 +1068,8 @@ class TestL3fwdacl(TestCase): self.start_l3fwdacl(scalar=True) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) out1 = self.send_ipv6_packet_match( TestL3fwdacl.scalar_rule_list_ipv6[0], tx_port, rx_port @@ -1078,7 +1078,7 @@ class TestL3fwdacl(TestCase): TestL3fwdacl.scalar_rule_list_ipv6[0], tx_port, rx_port ) - self.dut.send_expect("^C", "#", 20) + self.sut_node.send_expect("^C", "#", 20) self.verify(out1 <= 0, "Rx port received unexpected packet") self.verify(out2 >= 1, "Rx port not receive expected packet") @@ -1108,14 +1108,14 @@ class TestL3fwdacl(TestCase): self.app_l3fwd_acl_path, self.eal_para, self.port_mask, - self.dut_ports[0], - self.dut_ports[1], + self.sut_ports[0], + self.sut_ports[1], TestL3fwdacl.acl_ipv4_db, TestL3fwdacl.acl_ipv6_db, ) ) - out = self.dut.send_expect(cmdline, "# ", 30) + out = self.sut_node.send_expect(cmdline, "# ", 30) self.verify("fwd number illegal" in out, "l3fwd not detect invalid port") rule_list_ipv6 = [] @@ -1128,14 +1128,14 @@ class TestL3fwdacl(TestCase): self.app_l3fwd_acl_path, self.eal_para, self.port_mask, - self.dut_ports[0], - self.dut_ports[1], + self.sut_ports[0], + self.sut_ports[1], TestL3fwdacl.acl_ipv4_db, TestL3fwdacl.acl_ipv6_db, ) ) - out = self.dut.send_expect(cmdline, "# ", 30) + out = self.sut_node.send_expect(cmdline, "# ", 30) self.verify("fwd number illegal" in out, "l3fwd not detect invalid port") self.rule_cfg_delete(TestL3fwdacl.acl_ipv4_db, TestL3fwdacl.acl_ipv6_db) diff --git a/tests/TestSuite_large_vf.py b/tests/TestSuite_large_vf.py index 3dd1da74..4df36d17 100644 --- a/tests/TestSuite_large_vf.py +++ b/tests/TestSuite_large_vf.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -187,42 +187,42 @@ class TestLargeVf(TestCase): Run at the start of each test suite. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) self.verify( self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"], "%s nic not support large vf" % self.nic, ) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.vf_mac = "00:11:22:33:44:55" - self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - self.tester_iface0 = self.tester.get_interface(self.tester_port0) - self.used_dut_port = self.dut_ports[0] - self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf0_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] + self.tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_iface0 = self.tg_node.get_interface(self.tg_port0) + self.used_sut_port = self.sut_ports[0] + self.pf0_intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf0_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.max_vf_num = int( - self.dut.send_expect( + self.sut_node.send_expect( "cat /sys/bus/pci/devices/%s/sriov_totalvfs" % self.pf0_pci, "#" ) ) - self.pf0_mac = self.dut.get_mac_address(0) + self.pf0_mac = self.sut_node.get_mac_address(0) self.vf_flag = False # set vf driver self.vf_driver = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") - self.pkt = Packet() - self.pmd_output = PmdOutput(self.dut) + self.scapy_pkt_builder = ScapyPacketBuilder() + self.pmd_output = PmdOutput(self.sut_node) - self.app_path = self.dut.apps_name["test-pmd"] + self.app_path = self.sut_node.apps_name["test-pmd"] self.vf_num = 7 if self.max_vf_num > 128 else 3 self.pmdout_list = [] self.session_list = [] for i in range(self.vf_num): - session = self.dut.new_session() + session = self.sut_node.new_session() self.session_list.append(session) - pmdout = PmdOutput(self.dut, session) + pmdout = PmdOutput(self.sut_node, session) self.pmdout_list.append(pmdout) def set_up(self): @@ -233,17 +233,17 @@ class TestLargeVf(TestCase): def create_iavf(self, vf_numbers): # Generate 3 VFs on each PF and set mac address for VF0 - self.dut.bind_interfaces_linux("ice") - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, vf_numbers) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.bind_interfaces_linux("ice") + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, vf_numbers) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_flag = True try: for port in self.sriov_vfs_port: port.bind_driver(self.drivername) self.vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} - self.dut.send_expect("ifconfig %s up" % self.pf0_intf, "# ") - self.dut.send_expect( + self.sut_node.send_expect("ifconfig %s up" % self.pf0_intf, "# ") + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf0_intf, self.vf_mac), "# " ) except Exception as e: @@ -252,7 +252,7 @@ class TestLargeVf(TestCase): def destroy_iavf(self): if self.vf_flag is True: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) self.vf_flag = False def launch_testpmd(self, param, total=False, retry_times=3): @@ -279,13 +279,13 @@ class TestLargeVf(TestCase): self.pmd_output.execute_cmd("start") def send_packets(self, packets, count): - self.pkt.update_pkt(packets) - self.pkt.send_pkt(crb=self.tester, tx_port=self.tester_iface0, count=count) + self.scapy_pkt_builder.update_pkt(packets) + self.scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=self.tg_iface0, count=count) def send_pkts_getouput(self, pkts, count): self.send_packets(pkts, count) time.sleep(1) - out_info = self.dut.get_session_output(timeout=1) + out_info = self.sut_node.get_session_output(timeout=1) out_pkt = self.pmd_output.execute_cmd("stop") time.sleep(1) out = out_info + out_pkt @@ -336,7 +336,7 @@ class TestLargeVf(TestCase): self.destroy_pf_rule(self.pmdout_list[0], self.pf0_intf) elif subcase_name == "test_exceed_256_queues": self.pmd_output.execute_cmd("quit", "#") - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( prefix="port0vf0", ports=[self.sriov_vfs_port[0].pci] ) cmd = ( @@ -361,7 +361,7 @@ class TestLargeVf(TestCase): ports=[self.sriov_vfs_port[0].pci], prefix="port0vf0", ) - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( fixed_prefix=True, ports=[self.sriov_vfs_port[1].pci] ) cmd = ( @@ -386,7 +386,7 @@ class TestLargeVf(TestCase): ) else: # start fourth testpmd failed - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( fixed_prefix=True, ports=[self.sriov_vfs_port[-1].pci], ) @@ -417,14 +417,14 @@ class TestLargeVf(TestCase): self.check_match_mismatch_pkts(tv) elif subcase_name == "test_more_than_max_vfs_4_queues": self.pmd_output.execute_cmd("quit", "#") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format( self.max_vf_num, self.pf0_pci ), "# ", ) self.verify(tv["check_param"] not in out, "fail: create vfs failed") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format( self.max_vf_num + 1, self.pf0_pci ), @@ -651,26 +651,26 @@ class TestLargeVf(TestCase): self.rte_flow_process(max_vfs_256_queues_3) def test_max_vfs_4_queues(self): - session_last = self.dut.new_session() - pmdout = PmdOutput(self.dut, session_last) + session_last = self.sut_node.new_session() + pmdout = PmdOutput(self.sut_node, session_last) self.pmdout_list.append(pmdout) self.create_iavf(self.max_vf_num) self.launch_testpmd("--rxq=4 --txq=4") self.config_testpmd() self.rte_flow_process(max_vfs_4_queues) - self.dut.close_session(session_last) + self.sut_node.close_session(session_last) def tear_down(self): """ Run after each test case. """ self.pmd_output.execute_cmd("quit", "#") - self.dut.kill_all() + self.sut_node.kill_all() self.destroy_iavf() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.session_list) - self.dut.kill_all() + self.sut_node.close_session(self.session_list) + self.sut_node.kill_all() diff --git a/tests/TestSuite_link_flowctrl.py b/tests/TestSuite_link_flowctrl.py index 4ad89fe8..51caf882 100644 --- a/tests/TestSuite_link_flowctrl.py +++ b/tests/TestSuite_link_flowctrl.py @@ -12,10 +12,10 @@ import re from time import sleep import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestLinkFlowctrl(TestCase): @@ -41,15 +41,15 @@ class TestLinkFlowctrl(TestCase): Link flow control Prerequisites """ - self.dutPorts = self.dut.get_ports() - self.verify(len(self.dutPorts) > 1, "Insuficient ports") + self.sutPorts = self.sut_node.get_ports() + self.verify(len(self.sutPorts) > 1, "Insuficient ports") - self.rx_port = self.dutPorts[0] - self.tester_tx_mac = self.tester.get_mac( - self.tester.get_local_port(self.rx_port) + self.rx_port = self.sutPorts[0] + self.tg_tx_mac = self.tg_node.get_mac( + self.tg_node.get_local_port(self.rx_port) ) - self.tx_port = self.dutPorts[1] + self.tx_port = self.sutPorts[1] self.portMask = utils.create_mask([self.rx_port, self.tx_port]) @@ -60,10 +60,10 @@ class TestLinkFlowctrl(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd("all", "--portmask=%s" % self.portMask) def get_tgen_input(self): @@ -71,48 +71,48 @@ class TestLinkFlowctrl(TestCase): create streams for ports. """ - tester_tx_port = self.tester.get_local_port(self.rx_port) - tester_rx_port = self.tester.get_local_port(self.tx_port) + tg_tx_port = self.tg_node.get_local_port(self.rx_port) + tg_rx_port = self.tg_node.get_local_port(self.tx_port) tgenInput = [] pcap = os.sep.join([self.output_path, "test.pcap"]) - tgenInput.append((tester_tx_port, tester_rx_port, pcap)) + tgenInput.append((tg_tx_port, tg_rx_port, pcap)) return tgenInput def start_traffic(self, tgenInput): pcap = os.sep.join([self.output_path, "test.pcap"]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s",[Ether()/IP()/UDP()/("X"*%d)])' % (pcap, TestLinkFlowctrl.payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) options = {"duration": 60} - result = self.tester.pktgen.measure_loss(stream_ids=streams, options=options) + result = self.tg_node.perf_tg.measure_loss(stream_ids=streams, options=options) return result[0] def set_flow_ctrl( self, rx_flow_control="off", tx_flow_control="off", pause_frame_fwd="off" ): - self.dut.send_expect( + self.sut_node.send_expect( "set flow_ctrl rx %s tx %s 300 50 10 1 mac_ctrl_frame_fwd %s autoneg on %d " % (rx_flow_control, tx_flow_control, pause_frame_fwd, self.rx_port), "testpmd> ", ) - self.dut.send_expect("set fwd csum", "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 60) - self.pmdout.wait_link_status_up(self.dutPorts[0]) + self.sut_node.send_expect("set fwd csum", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 60) + self.pmdout.wait_link_status_up(self.sutPorts[0]) def pause_frame_loss_test( self, rx_flow_control="off", tx_flow_control="off", pause_frame_fwd="off" @@ -121,7 +121,7 @@ class TestLinkFlowctrl(TestCase): tgenInput = self.get_tgen_input() self.set_flow_ctrl(rx_flow_control, tx_flow_control, pause_frame_fwd) result = self.start_traffic(tgenInput) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") return result @@ -139,7 +139,7 @@ class TestLinkFlowctrl(TestCase): port_stats = {} for port in ports: - out = self.dut.send_expect("show port stats %d" % port, "testpmd> ") + out = self.sut_node.send_expect("show port stats %d" % port, "testpmd> ") rx_packets = int(rx.search(out).group(1)) tx_packets = int(tx.search(out).group(1)) @@ -149,26 +149,26 @@ class TestLinkFlowctrl(TestCase): return port_stats def send_packets(self, frame): - self.pmdout.wait_link_status_up(self.dutPorts[0]) - tester_tx_port = self.tester.get_local_port(self.rx_port) - tx_interface = self.tester.get_interface(tester_tx_port) - tester_rx_port = self.tester.get_local_port(self.tx_port) + self.pmdout.wait_link_status_up(self.sutPorts[0]) + tg_tx_port = self.tg_node.get_local_port(self.rx_port) + tx_interface = self.tg_node.get_interface(tg_tx_port) + tg_rx_port = self.tg_node.get_local_port(self.tx_port) tgenInput = [] - tgenInput.append((tester_tx_port, tester_rx_port, "test.pcap")) - self.tester.scapy_foreground() - self.tester.scapy_append( + tgenInput.append((tg_tx_port, tg_rx_port, "test.pcap")) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append( 'sendp(%s, iface="%s", count=%d)' % (frame, tx_interface, TestLinkFlowctrl.frames_to_sent) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # The following sleep is needed to allow all the packets to arrive. - # 1s works for Crown Pass (FC18) DUT, Lizard Head Pass (FC14) tester + # 1s works for Crown Pass (FC18) SUT, Lizard Head Pass (FC14) TG # using 82599. Increase it in case of packet loosing. sleep(1) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") port_stats = self.get_testpmd_port_stats((self.rx_port, self.tx_port)) return port_stats @@ -182,19 +182,19 @@ class TestLinkFlowctrl(TestCase): """ if self.nic in ["cavium_a063", "cavium_a064"]: - self.dut.send_expect( + self.sut_node.send_expect( "set flow_ctrl rx %s tx %s 300 50 10 1 autoneg %s %d " % (flow_control, flow_control, flow_control, self.rx_port), "testpmd> ", ) elif self.running_case == "test_pause_fwd_port_stop_start": - self.dut.send_expect( + self.sut_node.send_expect( "set flow_ctrl mac_ctrl_frame_fwd %s %d " % (pause_frame_fwd, self.rx_port), "testpmd> ", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "set flow_ctrl rx %s tx %s 300 50 10 1 mac_ctrl_frame_fwd %s autoneg %s %d " % ( flow_control, @@ -206,9 +206,9 @@ class TestLinkFlowctrl(TestCase): "testpmd> ", ) - self.dut.send_expect("set fwd io", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("set fwd io", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") port_stats = self.send_packets(frame) return port_stats @@ -271,7 +271,7 @@ class TestLinkFlowctrl(TestCase): elif option == 2: return TestLinkFlowctrl.pause_frame % ( - self.tester_tx_mac, + self.tg_tx_mac, TestLinkFlowctrl.pause_frame_dst, TestLinkFlowctrl.pause_frame_type, "\\x00\\x02", @@ -280,7 +280,7 @@ class TestLinkFlowctrl(TestCase): ) elif option == 3: return TestLinkFlowctrl.pause_frame % ( - self.tester_tx_mac, + self.tg_tx_mac, "01:80:C2:00:AB:10", TestLinkFlowctrl.pause_frame_type, TestLinkFlowctrl.pause_frame_opcode, @@ -289,7 +289,7 @@ class TestLinkFlowctrl(TestCase): ) return TestLinkFlowctrl.pause_frame % ( - self.tester_tx_mac, + self.tg_tx_mac, TestLinkFlowctrl.pause_frame_dst, TestLinkFlowctrl.pause_frame_type, TestLinkFlowctrl.pause_frame_opcode, @@ -410,10 +410,10 @@ class TestLinkFlowctrl(TestCase): self.check_pause_frame_test_result(port_stats, True, True) # test again after port stop/start - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ", 60) - self.dut.send_expect("start", "testpmd> ", 60) - self.dut.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("clear port stats all", "testpmd> ") port_stats = self.send_packets(pause_frame) self.check_pause_frame_test_result(port_stats, True, True) @@ -422,10 +422,10 @@ class TestLinkFlowctrl(TestCase): self.check_pause_frame_test_result(port_stats) # test again after port stop/start - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ", 60) - self.dut.send_expect("start", "testpmd> ", 60) - self.dut.send_expect("clear port stats all", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) + self.sut_node.send_expect("clear port stats all", "testpmd> ") port_stats = self.send_packets(pause_frame) self.check_pause_frame_test_result(port_stats) @@ -552,11 +552,11 @@ class TestLinkFlowctrl(TestCase): ) # test again after port stop/start - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ", 60) - self.dut.send_expect("start", "testpmd> ", 60) - self.pmdout.wait_link_status_up(self.dutPorts[0]) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) + self.pmdout.wait_link_status_up(self.sutPorts[0]) tgenInput = self.get_tgen_input() result = self.start_traffic(tgenInput) self.logger.info("Packet loss: %.3f" % result) @@ -564,7 +564,7 @@ class TestLinkFlowctrl(TestCase): result <= 0.01, "Link flow control fail after port stop/start, the loss percent is more than 1%", ) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") # Disable link flow control and PAUSE frame forwarding self.set_flow_ctrl( @@ -583,11 +583,11 @@ class TestLinkFlowctrl(TestCase): "Link flow control fail, the loss percent is less than 50%", ) # test again after port Stop/start - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ", 60) - self.dut.send_expect("start", "testpmd> ", 60) - self.pmdout.wait_link_status_up(self.dutPorts[0]) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ", 60) + self.sut_node.send_expect("start", "testpmd> ", 60) + self.pmdout.wait_link_status_up(self.sutPorts[0]) result = self.start_traffic(tgenInput) self.logger.info("Packet loss: %.3f" % result) if self.nic == "IXGBE_10G-82599_SFP": @@ -600,14 +600,14 @@ class TestLinkFlowctrl(TestCase): result >= 0.5, "Link flow control fail, the loss percent is less than 50%", ) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_link_status_interrupt.py b/tests/TestSuite_link_status_interrupt.py index 971eac1d..13ad5930 100644 --- a/tests/TestSuite_link_status_interrupt.py +++ b/tests/TestSuite_link_status_interrupt.py @@ -12,7 +12,7 @@ import string import time import framework.utils as utils -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -21,18 +21,18 @@ class TestLinkStatusInterrupt(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + cores = self.sut_node.get_core_list("1S/4C/1T") self.coremask = utils.create_mask(cores) - self.portmask = utils.create_mask(self.dut_ports) - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") - self.app_link_status_interrupt_path = self.dut.apps_name[ + self.portmask = utils.create_mask(self.sut_ports) + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + self.app_link_status_interrupt_path = self.sut_node.apps_name[ "link_status_interrupt" ] # build sample app - out = self.dut.build_dpdk_apps("./examples/link_status_interrupt") + out = self.sut_node.build_dpdk_apps("./examples/link_status_interrupt") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") # from kernel 4.8+, kernel will not support legacy intr mode. @@ -52,36 +52,36 @@ class TestLinkStatusInterrupt(TestCase): if self.drivername == "vfio-pci": self.basic_intr_mode.append("msi") self.intfs = [ - self.tester.get_interface(self.tester.get_local_port(i)) - for i in self.dut_ports + self.tg_node.get_interface(self.tg_node.get_local_port(i)) + for i in self.sut_ports ] # check link-down-on-close flag self.flag = "link-down-on-close" for intf in self.intfs: set_flag = "ethtool --set-priv-flags %s %s on" % (intf, self.flag) - self.flag_default_stats = self.tester.get_priv_flags_state(intf, self.flag) + self.flag_default_stats = self.tg_node.get_priv_flags_state(intf, self.flag) if self.flag_default_stats == "off": - self.tester.send_expect(set_flag, "# ") + self.tg_node.send_expect(set_flag, "# ") time.sleep(0.5) self.verify( - self.tester.get_priv_flags_state(intf, self.flag) == "on", + self.tg_node.get_priv_flags_state(intf, self.flag) == "on", "set %s %s on failed" % (intf, self.flag), ) - def set_link_status_and_verify(self, dutPort, status): + def set_link_status_and_verify(self, sutPort, status): """ set link status verify results """ - self.intf = self.tester.get_interface(self.tester.get_local_port(dutPort)) - if self.dut.get_os_type() != "freebsd" and self.flag_default_stats: - self.tester.send_expect( + self.intf = self.tg_node.get_interface(self.tg_node.get_local_port(sutPort)) + if self.sut_node.get_os_type() != "freebsd" and self.flag_default_stats: + self.tg_node.send_expect( "ethtool --set-priv-flags %s link-down-on-close on" % self.intf, "#", 10 ) - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s %s" % (self.intf, status.lower()), "# ", 10 ) - verify_point = "Port %s Link %s" % (dutPort, status.lower()) - out = self.dut.get_session_output(timeout=60) + verify_point = "Port %s Link %s" % (sutPort, status.lower()) + out = self.sut_node.get_session_output(timeout=60) self.verify(verify_point in out, "link status update error") def set_up(self): @@ -101,19 +101,19 @@ class TestLinkStatusInterrupt(TestCase): self.portmask, ) for mode in self.basic_intr_mode: - self.dut.send_expect("rmmod -f igb_uio", "#", 20) - self.dut.send_expect( + self.sut_node.send_expect("rmmod -f igb_uio", "#", 20) + self.sut_node.send_expect( 'insmod %s/kmod/igb_uio.ko "intr_mode=%s"' % (self.target, mode), "# ", ) - self.dut.bind_interfaces_linux() - self.dut.send_command(cmdline, 180) - out = self.dut.get_session_output(timeout=60) + self.sut_node.bind_interfaces_linux() + self.sut_node.send_command(cmdline, 180) + out = self.sut_node.get_session_output(timeout=60) self.verify("Port statistics" in out, "setup example error") time.sleep(10) - self.set_link_status_and_verify(self.dut_ports[0], "Down") - self.set_link_status_and_verify(self.dut_ports[0], "Up") - self.dut.send_expect("^C", "#", 60) + self.set_link_status_and_verify(self.sut_ports[0], "Down") + self.set_link_status_and_verify(self.sut_ports[0], "Up") + self.sut_node.send_expect("^C", "#", 60) elif self.drivername == "vfio-pci": for mode in self.basic_intr_mode: cmdline = ( @@ -121,10 +121,10 @@ class TestLinkStatusInterrupt(TestCase): + " %s --vfio-intr=%s -- -p %s" % (self.eal_para, mode, self.portmask) ) - self.dut.send_expect(cmdline, "statistics", 120) - self.set_link_status_and_verify(self.dut_ports[0], "Down") - self.set_link_status_and_verify(self.dut_ports[0], "Up") - self.dut.send_expect("^C", "#", 60) + self.sut_node.send_expect(cmdline, "statistics", 120) + self.set_link_status_and_verify(self.sut_ports[0], "Down") + self.set_link_status_and_verify(self.sut_ports[0], "Up") + self.sut_node.send_expect("^C", "#", 60) def test_link_status_interrupt_port_available(self): """ @@ -137,27 +137,27 @@ class TestLinkStatusInterrupt(TestCase): self.portmask, ) for mode in self.basic_intr_mode: - self.dut.send_expect("rmmod -f igb_uio", "#", 20) - self.dut.send_expect( + self.sut_node.send_expect("rmmod -f igb_uio", "#", 20) + self.sut_node.send_expect( 'insmod %s/kmod/igb_uio.ko "intr_mode=%s"' % (self.target, mode), "# ", ) - self.dut.bind_interfaces_linux() - self.dut.send_expect(cmdline, "Aggregate statistics", 60) - for port in self.dut_ports: - self.set_link_status_and_verify(self.dut_ports[port], "Down") + self.sut_node.bind_interfaces_linux() + self.sut_node.send_expect(cmdline, "Aggregate statistics", 60) + for port in self.sut_ports: + self.set_link_status_and_verify(self.sut_ports[port], "Down") time.sleep(10) - for port in self.dut_ports: - self.set_link_status_and_verify(self.dut_ports[port], "Up") + for port in self.sut_ports: + self.set_link_status_and_verify(self.sut_ports[port], "Up") self.scapy_send_packet(1) - out = self.dut.get_session_output(timeout=60) + out = self.sut_node.get_session_output(timeout=60) pkt_send = re.findall("Total packets sent:\s*(\d*)", out) pkt_received = re.findall("Total packets received:\s*(\d*)", out) self.verify( pkt_send[-1] == pkt_received[-1] == "1", "Error: sent packets != received packets", ) - self.dut.send_expect("^C", "#", 60) + self.sut_node.send_expect("^C", "#", 60) elif self.drivername == "vfio-pci": for mode in self.basic_intr_mode: cmdline = ( @@ -165,50 +165,50 @@ class TestLinkStatusInterrupt(TestCase): + " %s --vfio-intr=%s -- -p %s" % (self.eal_para, mode, self.portmask) ) - self.dut.send_expect(cmdline, "Aggregate statistics", 60) - for port in self.dut_ports: - self.set_link_status_and_verify(self.dut_ports[port], "Down") + self.sut_node.send_expect(cmdline, "Aggregate statistics", 60) + for port in self.sut_ports: + self.set_link_status_and_verify(self.sut_ports[port], "Down") time.sleep(10) - for port in self.dut_ports: - self.set_link_status_and_verify(self.dut_ports[port], "Up") + for port in self.sut_ports: + self.set_link_status_and_verify(self.sut_ports[port], "Up") self.scapy_send_packet(1) - out = self.dut.get_session_output(timeout=60) + out = self.sut_node.get_session_output(timeout=60) pkt_send = re.findall("Total packets sent:\s*(\d*)", out) pkt_received = re.findall("Total packets received:\s*(\d*)", out) self.verify( pkt_send[-1] == pkt_received[-1] == "1", "Error: sent packets != received packets", ) - self.dut.send_expect("^C", "#", 60) + self.sut_node.send_expect("^C", "#", 60) def scapy_send_packet(self, num=1): """ Send a packet to port """ - self.dmac = self.dut.get_mac_address(self.dut_ports[0]) - txport = self.tester.get_local_port(self.dut_ports[0]) - self.txItf = self.tester.get_interface(txport) - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": self.dmac}) - pkt.send_pkt(self.tester, tx_port=self.txItf, count=num) + self.dmac = self.sut_node.get_mac_address(self.sut_ports[0]) + txport = self.tg_node.get_local_port(self.sut_ports[0]) + self.txItf = self.tg_node.get_interface(txport) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": self.dmac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf, count=num) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) - for port in self.dut_ports: - intf = self.tester.get_interface(self.tester.get_local_port(port)) - self.tester.send_expect("ifconfig %s up" % intf, "# ", 10) + for port in self.sut_ports: + intf = self.tg_node.get_interface(self.tg_node.get_local_port(port)) + self.tg_node.send_expect("ifconfig %s up" % intf, "# ", 10) def tear_down_all(self): """ Run after each test suite. """ - if self.dut.get_os_type() != "freebsd" and self.flag_default_stats: + if self.sut_node.get_os_type() != "freebsd" and self.flag_default_stats: for intf in self.intfs: - self.tester.send_expect( + self.tg_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (intf, self.flag, self.flag_default_stats), "# ", diff --git a/tests/TestSuite_linux_modules.py b/tests/TestSuite_linux_modules.py index e0c8692f..0de5dbe8 100644 --- a/tests/TestSuite_linux_modules.py +++ b/tests/TestSuite_linux_modules.py @@ -30,23 +30,23 @@ class LinuxModulesHelperMethods: """ Prerequisite steps for each test suit. """ - self.dut_ports = self.dut.get_ports() - self.pmdout = PmdOutput(self.dut) - pci_address = self.dut.ports_info[self.dut_ports[0]]["pci"] + self.sut_ports = self.sut_node.get_ports() + self.pmdout = PmdOutput(self.sut_node) + pci_address = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.old_driver = settings.get_nic_driver(pci_address) - out = self.dut.bind_interfaces_linux(driver=self.driver) + out = self.sut_node.bind_interfaces_linux(driver=self.driver) self.verify("bind failed" not in out, f"Failed to bind {self.driver}") self.verify("not loaded" not in out, f"{self.driver} was not loaded") def send_scapy_packet(self, port_id: int, packet: str): - itf = self.tester.get_interface(port_id) + itf = self.tg_node.get_interface(port_id) - self.tester.scapy_foreground() - self.tester.scapy_append(f'sendp({packet}, iface="{itf}")') - return self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append(f'sendp({packet}, iface="{itf}")') + return self.tg_node.scapy_execute() def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def run_example_program_in_userspace(self, directory: str, program: str): """ @@ -54,25 +54,25 @@ class LinuxModulesHelperMethods: @param directory: The directory under examples where the app is @param program: the name of the binary to run """ - out: str = self.dut.build_dpdk_apps(f"$RTE_SDK/examples/{directory}") + out: str = self.sut_node.build_dpdk_apps(f"$RTE_SDK/examples/{directory}") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") program_build_location = f"$RTE_SDK/examples/{directory}/build/{program}" - program_user_location = f"/tmp/dut/bin/{program}" + program_user_location = f"/tmp/sut/bin/{program}" - self.dut.send_expect(f"chmod +x {program_build_location}", "# ") - self.dut.send_expect("mkdir -p /tmp/dut/bin/", "# ") - user_home_dir = self.dut.send_expect( + self.sut_node.send_expect(f"chmod +x {program_build_location}", "# ") + self.sut_node.send_expect("mkdir -p /tmp/sut/bin/", "# ") + user_home_dir = self.sut_node.send_expect( f"cp {program_build_location} {program_user_location}", "# " ) - self.dut.alt_session.send_expect(f"su {settings.UNPRIVILEGED_USERNAME}", "# ") - self.dut.alt_session.send_expect( + self.sut_node.alt_session.send_expect(f"su {settings.UNPRIVILEGED_USERNAME}", "# ") + self.sut_node.alt_session.send_expect( f"{program_user_location} --in-memory {self.additional_eal_options}", "# " ) - out: str = self.dut.alt_session.send_expect("echo $?", "# ") - self.dut.alt_session.send_expect("exit", "# ") # Return to root session + out: str = self.sut_node.alt_session.send_expect("echo $?", "# ") + self.sut_node.alt_session.send_expect("exit", "# ") # Return to root session self.verify(out.strip() == "0", f"{program} exited in an error state") def tx_rx_test_helper(self, pmdout, param="", eal_param=""): @@ -82,30 +82,30 @@ class LinuxModulesHelperMethods: eal_param=f"{eal_param} {self.additional_eal_options}", ) pmdout.execute_cmd("start") - dut_mac = self.dut.get_mac_address(self.dut_ports[0]) - tester_mac = self.tester.get_mac(self.tester.get_local_port(self.dut_ports[0])) - iface = self.tester.get_interface(self.dut_ports[0]) - pcap_path: str = f"/tmp/tester/test-{self.driver}.pcap" - self.tester.send_expect( - f"tcpdump -i {iface} -w /tmp/tester/test-{self.driver}.pcap ether src {tester_mac} &", + sut_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(self.tg_node.get_local_port(self.sut_ports[0])) + iface = self.tg_node.get_interface(self.sut_ports[0]) + pcap_path: str = f"/tmp/tg/test-{self.driver}.pcap" + self.tg_node.send_expect( + f"tcpdump -i {iface} -w /tmp/tg/test-{self.driver}.pcap ether src {tg_mac} &", "# ", ) - self.tester.send_expect(f"TCPDUMP_PID=$!", "# ") + self.tg_node.send_expect(f"TCPDUMP_PID=$!", "# ") self.send_scapy_packet( - self.dut_ports[0], - f"[Ether(dst='{dut_mac}', src='{tester_mac}')/IP()/TCP()/('a') for i in range(20)]", + self.sut_ports[0], + f"[Ether(dst='{sut_mac}', src='{tg_mac}')/IP()/TCP()/('a') for i in range(20)]", ) time.sleep(0.1) - self.tester.send_expect("kill -SIGINT $TCPDUMP_PID", "# ") + self.tg_node.send_expect("kill -SIGINT $TCPDUMP_PID", "# ") os.system(f"mkdir -p {settings.FOLDERS['Output']}/tmp/pcap/") - self.tester.session.copy_file_from( + self.tg_node.session.copy_file_from( pcap_path, dst=os.path.join(settings.FOLDERS["Output"], "tmp/pcap/") ) - out: str = self.tester.send_expect( - f"tcpdump -r /tmp/tester/test-{self.driver}.pcap", "# " + out: str = self.tg_node.send_expect( + f"tcpdump -r /tmp/tg/test-{self.driver}.pcap", "# " ) self.verify( - len(out.splitlines()) >= 20, "Not all packets were received by the tester." + len(out.splitlines()) >= 20, "Not all packets were received by the TG." ) pmdout.quit() @@ -120,8 +120,8 @@ class LinuxModulesHelperMethods: When the case of this test suite finished, the environment should clear up. """ - self.dut.bind_interfaces_linux(driver=self.old_driver) - self.dut.kill_all() + self.sut_node.bind_interfaces_linux(driver=self.old_driver) + self.sut_node.kill_all() def test_tx_rx(self): """ @@ -134,19 +134,19 @@ class LinuxModulesHelperMethods: self.run_example_program_in_userspace("helloworld", "helloworld-shared") def test_tx_rx_userspace(self): - app_path = self.dut.apps_name["test-pmd"] - self.dut.send_expect(f"chmod +rx {app_path}", "#") - path = self.dut.send_expect("pwd", "#") - self.dut.alt_session.send_expect(f"su {settings.UNPRIVILEGED_USERNAME}", "#") - self.dut.alt_session.send_expect(f"cd {path}", "#") - self.dut.send_expect( + app_path = self.sut_node.apps_name["test-pmd"] + self.sut_node.send_expect(f"chmod +rx {app_path}", "#") + path = self.sut_node.send_expect("pwd", "#") + self.sut_node.alt_session.send_expect(f"su {settings.UNPRIVILEGED_USERNAME}", "#") + self.sut_node.alt_session.send_expect(f"cd {path}", "#") + self.sut_node.send_expect( f"setfacl -m u:{settings.UNPRIVILEGED_USERNAME}:rwx {self.dev_interface}", "#", ) self.tx_rx_test_helper( - PmdOutput(self.dut, session=self.dut.alt_session), eal_param="--in-memory" + PmdOutput(self.sut_node, session=self.sut_node.alt_session), eal_param="--in-memory" ) - self.dut.alt_session.send_expect(f"exit", "#") + self.sut_node.alt_session.send_expect(f"exit", "#") class TestVfio(LinuxModulesHelperMethods, TestCase): diff --git a/tests/TestSuite_loopback_multi_paths_port_restart.py b/tests/TestSuite_loopback_multi_paths_port_restart.py index c14070ce..7242f8d4 100644 --- a/tests/TestSuite_loopback_multi_paths_port_restart.py +++ b/tests/TestSuite_loopback_multi_paths_port_restart.py @@ -23,14 +23,14 @@ class TestLoopbackPortRestart(TestCase): """ self.frame_sizes = [64, 1518] self.core_config = "1S/5C/1T" - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_user = self.core_list[0:2] self.core_list_host = self.core_list[2:5] - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): @@ -38,24 +38,24 @@ class TestLoopbackPortRestart(TestCase): Run before each test case. """ # Clean the execution ENV - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") # Prepare the result table self.table_header = ["FrameSize(B)", "Mode", "Throughput(Mpps)", "Cycle"] self.result_table_create(self.table_header) - self.vhost = self.dut.new_session(suite="vhost") - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost) - self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user) + self.vhost = self.sut_node.new_session(suite="vhost") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost) + self.virtio_user_pmd = PmdOutput(self.sut_node, self.virtio_user) def start_vhost_testpmd(self): """ start testpmd on vhost """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") eal_params = "--vdev 'net_vhost0,iface=vhost-net,queues=1,client=0'" param = "--nb-cores=1 --txd=1024 --rxd=1024" self.vhost_pmd.start_testpmd( @@ -71,7 +71,7 @@ class TestLoopbackPortRestart(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -199,8 +199,8 @@ class TestLoopbackPortRestart(TestCase): """ close session of vhost-user and virtio-user """ - self.dut.close_session(self.vhost) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost) + self.sut_node.close_session(self.virtio_user) def test_loopback_test_with_packed_ring_mergeable_path(self): """ @@ -353,7 +353,7 @@ class TestLoopbackPortRestart(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() time.sleep(2) diff --git a/tests/TestSuite_loopback_multi_queues.py b/tests/TestSuite_loopback_multi_queues.py index c6f3b5b5..2d3ae075 100644 --- a/tests/TestSuite_loopback_multi_queues.py +++ b/tests/TestSuite_loopback_multi_queues.py @@ -24,9 +24,9 @@ class TestLoopbackMultiQueues(TestCase): """ self.frame_sizes = [64, 1518] self.verify_queue = [1, 8] - self.dut_ports = self.dut.get_ports() - port_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list(config="all", socket=port_socket) + self.sut_ports = self.sut_node.get_ports() + port_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list(config="all", socket=port_socket) self.cores_num = len(self.core_list) self.logger.info( "you can config packet_size in file %s.cfg," % self.suite_name @@ -35,7 +35,7 @@ class TestLoopbackMultiQueues(TestCase): # get the frame_sizes from cfg file if "packet_sizes" in self.get_suite_cfg(): self.frame_sizes = self.get_suite_cfg()["packet_sizes"] - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): @@ -43,16 +43,16 @@ class TestLoopbackMultiQueues(TestCase): Run before each test case. """ # Prepare the result table - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.table_header = ["Frame", "Mode", "Throughput(Mpps)", "Queue Number"] self.result_table_create(self.table_header) self.data_verify = {} - self.vhost = self.dut.new_session(suite="vhost") - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost) - self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user) + self.vhost = self.sut_node.new_session(suite="vhost") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost) + self.virtio_user_pmd = PmdOutput(self.sut_node, self.virtio_user) def get_core_mask(self): """ @@ -88,7 +88,7 @@ class TestLoopbackMultiQueues(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -204,8 +204,8 @@ class TestLoopbackMultiQueues(TestCase): """ close all session of vhost and vhost-user """ - self.dut.close_session(self.virtio_user) - self.dut.close_session(self.vhost) + self.sut_node.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost) def test_loopback_multi_queue_virtio11_mergeable(self): """ @@ -394,7 +394,7 @@ class TestLoopbackMultiQueues(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() def tear_down_all(self): diff --git a/tests/TestSuite_loopback_virtio_user_server_mode.py b/tests/TestSuite_loopback_virtio_user_server_mode.py index 35ddd79f..363e8542 100644 --- a/tests/TestSuite_loopback_virtio_user_server_mode.py +++ b/tests/TestSuite_loopback_virtio_user_server_mode.py @@ -11,8 +11,8 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -24,22 +24,22 @@ class TestLoopbackVirtioUserServerMode(TestCase): self.core_config = "1S/6C/1T" self.queue_number = 1 self.nb_cores = 1 - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) self.verify( self.cores_num >= 6, "There has not enought cores to test this case" ) - self.dut_ports = self.dut.get_ports() - self.unbind_ports = copy.deepcopy(self.dut_ports) - self.dut.unbind_interfaces_linux(self.unbind_ports) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.sut_ports = self.sut_node.get_ports() + self.unbind_ports = copy.deepcopy(self.sut_ports) + self.sut_node.unbind_interfaces_linux(self.unbind_ports) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_user = self.core_list[0:3] self.core_list_host = self.core_list[3:6] - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] - self.app_pdump = self.dut.apps_name["pdump"] + self.app_pdump = self.sut_node.apps_name["pdump"] self.dump_pcap = "/root/pdump-rx.pcap" self.device_str = "" self.cbdma_dev_infos = [] @@ -49,8 +49,8 @@ class TestLoopbackVirtioUserServerMode(TestCase): Run before each test case. """ # Clean the execution ENV - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") # Prepare the result table self.table_header = [ "Mode", @@ -61,10 +61,10 @@ class TestLoopbackVirtioUserServerMode(TestCase): ] self.result_table_create(self.table_header) - self.vhost = self.dut.new_session(suite="vhost") - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost) - self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user) + self.vhost = self.sut_node.new_session(suite="vhost") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost) + self.virtio_user_pmd = PmdOutput(self.sut_node, self.virtio_user) def lanuch_vhost_testpmd(self, queue_number=1, nb_cores=1, extern_params=""): """ @@ -89,7 +89,7 @@ class TestLoopbackVirtioUserServerMode(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -283,9 +283,9 @@ class TestLoopbackVirtioUserServerMode(TestCase): def launch_pdump_to_capture_pkt(self, dump_port): """ - bootup pdump in dut + bootup pdump in SUT """ - self.pdump_session = self.dut.new_session(suite="pdump") + self.pdump_session = self.sut_node.new_session(suite="pdump") cmd = ( self.app_pdump + " " @@ -300,11 +300,11 @@ class TestLoopbackVirtioUserServerMode(TestCase): """ self.pdump_session.send_expect("^c", "# ", 60) time.sleep(3) - self.dut.session.copy_file_from( + self.sut_node.session.copy_file_from( src="%s" % self.dump_pcap, dst="%s" % self.dump_pcap ) - pkt = Packet() - pkts = pkt.read_pcapfile(self.dump_pcap) + scapy_pkt_builder = ScapyPacketBuilder() + pkts = scapy_pkt_builder.read_pcapfile(self.dump_pcap) expect_data = str(pkts[0]["Raw"]) for i in range(len(pkts)): @@ -317,7 +317,7 @@ class TestLoopbackVirtioUserServerMode(TestCase): check_data == expect_data, "the payload in receive packets has been changed from %s" % i, ) - self.dut.send_expect("rm -rf %s" % self.dump_pcap, "#") + self.sut_node.send_expect("rm -rf %s" % self.dump_pcap, "#") def relanuch_vhost_testpmd_send_packets( self, extern_params, cbdma=False, iova="va" @@ -468,8 +468,8 @@ class TestLoopbackVirtioUserServerMode(TestCase): """ close session of vhost-user and virtio-user """ - self.dut.close_session(self.vhost) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost) + self.sut_node.close_session(self.virtio_user) def test_server_mode_launch_virtio_first(self): """ @@ -1271,7 +1271,7 @@ class TestLoopbackVirtioUserServerMode(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -1292,7 +1292,7 @@ class TestLoopbackVirtioUserServerMode(TestCase): "There no enough cbdma device to run this suite", ) self.device_str = " ".join(self.cbdma_dev_infos[0 : self.cbdma_nic_dev_num]) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -1301,11 +1301,11 @@ class TestLoopbackVirtioUserServerMode(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -1316,7 +1316,7 @@ class TestLoopbackVirtioUserServerMode(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() time.sleep(2) diff --git a/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py b/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py index 5b49eaf9..aeaa2e42 100644 --- a/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py +++ b/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py @@ -9,8 +9,8 @@ Test loopback virtio-user server mode import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -19,30 +19,30 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.core_list[0:9] self.virtio0_core_list = self.core_list[10:12] - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] - self.app_pdump = self.dut.apps_name["pdump"] + self.app_pdump = self.sut_node.apps_name["pdump"] self.dump_pcap_q0 = "/root/pdump-rx-q0.pcap" self.dump_pcap_q1 = "/root/pdump-rx-q1.pcap" self.device_str = None self.cbdma_dev_infos = [] - self.vhost_user = self.dut.new_session(suite="vhost_user") - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.pdump_session = self.dut.new_session(suite="pdump") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user) + self.vhost_user = self.sut_node.new_session(suite="vhost_user") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.pdump_session = self.sut_node.new_session(suite="pdump") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user_pmd = PmdOutput(self.sut_node, self.virtio_user) def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.table_header = [ "Mode", "Pkt_size", @@ -54,7 +54,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -108,9 +108,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): self.pdump_session.send_expect("^c", "# ", 60) dump_file_list = [self.dump_pcap_q0, self.dump_pcap_q1] for pcap in dump_file_list: - self.dut.session.copy_file_from(src="%s" % pcap, dst="%s" % pcap) - pkt = Packet() - pkts = pkt.read_pcapfile(pcap) + self.sut_node.session.copy_file_from(src="%s" % pcap, dst="%s" % pcap) + scapy_pkt_builder = ScapyPacketBuilder() + pkts = scapy_pkt_builder.read_pcapfile(pcap) expect_data = str(pkts[0]["Raw"]) for i in range(len(pkts)): self.verify( @@ -166,7 +166,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -190,7 +190,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -198,11 +198,11 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): ) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -212,8 +212,8 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): """ close session of vhost-user and virtio-user """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) def test_server_mode_packed_ring_all_path_multi_queues_payload_check_with_cbdma( self, @@ -944,7 +944,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase): """ self.virtio_user_pmd.quit() self.vhost_user_pmd.quit() - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): diff --git a/tests/TestSuite_mac_filter.py b/tests/TestSuite_mac_filter.py index a0ba65c7..a808abaf 100644 --- a/tests/TestSuite_mac_filter.py +++ b/tests/TestSuite_mac_filter.py @@ -11,8 +11,8 @@ import operator import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -26,26 +26,26 @@ class TestMacFilter(TestCase): """ self.frames_to_send = 4 # Based on h/w type, choose how many ports to use - self.dutPorts = self.dut.get_ports() + self.sutPorts = self.sut_node.get_ports() # Verify that enough ports are available - self.verify(len(self.dutPorts) >= 1, "Insufficient ports") + self.verify(len(self.sutPorts) >= 1, "Insufficient ports") def set_up(self): """ Run before each test case. Nothing to do. """ - portMask = utils.create_mask(self.dutPorts[:1]) - self.pmdout = PmdOutput(self.dut) + portMask = utils.create_mask(self.sutPorts[:1]) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd("Default", "--portmask=%s" % portMask) - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # get dest address from self.target port - out = self.dut.send_expect("show port info %d" % self.dutPorts[0], "testpmd> ") + out = self.sut_node.send_expect("show port info %d" % self.sutPorts[0], "testpmd> ") - self.dest = self.dut.get_mac_address(self.dutPorts[0]) + self.dest = self.sut_node.get_mac_address(self.sutPorts[0]) mac_scanner = r"MAC address: (([\dA-F]{2}:){5}[\dA-F]{2})" ret = utils.regexp(out, mac_scanner) @@ -64,10 +64,10 @@ class TestMacFilter(TestCase): if count == -1: count = self.frames_to_send - itf = self.tester.get_interface(self.tester.get_local_port(portid)) - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"src": "52:00:00:00:00:00", "dst": destMac}) - pkt.send_pkt(self.tester, tx_port=itf, count=count) + itf = self.tg_node.get_interface(self.tg_node.get_local_port(portid)) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"src": "52:00:00:00:00:00", "dst": destMac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf, count=count) def test_add_remove_mac_address(self): """ @@ -76,16 +76,16 @@ class TestMacFilter(TestCase): """ # initialise first port without promiscuous mode fake_mac_addr = "00:01:01:00:00:00" - portid = self.dutPorts[0] - self.dut.send_expect("set promisc %d off" % portid, "testpmd> ") - self.dut.send_expect("clear port stats all", "testpmd> ") + portid = self.sutPorts[0] + self.sut_node.send_expect("set promisc %d off" % portid, "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") # send one packet with the portid MAC address self.allowlist_send_packet(portid, self.dest) # 82599 and Intel® Ethernet 700 Series have different packet statistics when using the # "show port stats" command. Packets number is stripped from log. - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() cur_rxpkt = utils.regexp(out, "received ([0-9]+) packets") # check the packet increase self.verify( @@ -94,9 +94,9 @@ class TestMacFilter(TestCase): ) # send one packet to a different MAC address - # new_mac = self.dut.get_mac_address(portid) + # new_mac = self.sut_node.get_mac_address(portid) self.allowlist_send_packet(portid, fake_mac_addr) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() # check the packet DO NOT increase self.verify( "received" not in out, @@ -104,12 +104,12 @@ class TestMacFilter(TestCase): ) # add the different MAC address - self.dut.send_expect( + self.sut_node.send_expect( "mac_addr add %d" % portid + " %s" % fake_mac_addr, "testpmd>" ) # send again one packet to a different MAC address self.allowlist_send_packet(portid, fake_mac_addr) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() cur_rxpkt = utils.regexp(out, "received ([0-9]+) packets") # check the packet increase self.verify( @@ -118,19 +118,19 @@ class TestMacFilter(TestCase): ) # remove the fake MAC address - self.dut.send_expect( + self.sut_node.send_expect( "mac_addr remove %d" % portid + " %s" % fake_mac_addr, "testpmd>" ) # send again one packet to a different MAC address self.allowlist_send_packet(portid, fake_mac_addr) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() # check the packet increase self.verify( "received" not in out, "Packet has been received on a new MAC address that has been removed from the port", ) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_invalid_addresses(self): """ @@ -140,27 +140,27 @@ class TestMacFilter(TestCase): Add Same MAC twice will be failed Add more than MAX number will be failed """ - portid = self.dutPorts[0] + portid = self.sutPorts[0] fake_mac_addr = "00:00:00:00:00:00" # add an address with all zeroes to the port (-EINVAL) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "mac_addr add %d" % portid + " %s" % fake_mac_addr, "testpmd>" ) self.verify("Invalid argument" in out, "Added a NULL MAC address") # remove the default MAC address (-EADDRINUSE) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "mac_addr remove %d" % portid + " %s" % self.dest, "testpmd>" ) self.verify("Address already in use" in out, "default address removed") # add same address 2 times fake_mac_addr = "00:00:00:00:00:01" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "mac_addr add %d" % portid + " %s" % fake_mac_addr, "testpmd>" ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "mac_addr add %d" % portid + " %s" % fake_mac_addr, "testpmd>" ) self.verify("error" not in out, "added 2 times the same address with an error") @@ -170,7 +170,7 @@ class TestMacFilter(TestCase): base_addr = "00:01:00:00:00:" while i < int(self.max_mac_addr): new_addr = base_addr + "%0.2X" % i - out = self.dut.send_expect( + out = self.sut_node.send_expect( "mac_addr add %d" % portid + " %s" % new_addr, "testpmd>" ) i = i + 1 @@ -179,8 +179,8 @@ class TestMacFilter(TestCase): "No space left on device" in out, "added 1 address more than max MAC addresses", ) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_multicast_filter(self): """ @@ -189,30 +189,30 @@ class TestMacFilter(TestCase): """ # initialise first port without promiscuous mode mcast_addr = "01:00:5E:00:00:00" - portid = self.dutPorts[0] - self.dut.send_expect(f"set promisc {portid:d} off", "testpmd> ") - self.dut.send_expect("clear port stats all", "testpmd> ") + portid = self.sutPorts[0] + self.sut_node.send_expect(f"set promisc {portid:d} off", "testpmd> ") + self.sut_node.send_expect("clear port stats all", "testpmd> ") - self.dut.send_expect(f"mcast_addr add {portid:d} {mcast_addr}", "testpmd>") + self.sut_node.send_expect(f"mcast_addr add {portid:d} {mcast_addr}", "testpmd>") self.allowlist_send_packet(portid, mcast_addr, count=1) time.sleep(1) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify( "received" in out, "Packet has not been received when it should have on a broadcast address", ) - self.dut.send_expect(f"mcast_addr remove {portid:d} {mcast_addr}", "testpmd>") + self.sut_node.send_expect(f"mcast_addr remove {portid:d} {mcast_addr}", "testpmd>") self.allowlist_send_packet(portid, mcast_addr, count=1) time.sleep(1) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify( "received" not in out, "Packet has been received when it should have ignored the broadcast", ) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def tear_down(self): """ @@ -225,4 +225,4 @@ class TestMacFilter(TestCase): """ Run after each test suite. """ - self.dut.send_expect("quit", "# ", 10) + self.sut_node.send_expect("quit", "# ", 10) diff --git a/tests/TestSuite_macsec_for_ixgbe.py b/tests/TestSuite_macsec_for_ixgbe.py index 643b3dee..ff01afd0 100644 --- a/tests/TestSuite_macsec_for_ixgbe.py +++ b/tests/TestSuite_macsec_for_ixgbe.py @@ -6,10 +6,10 @@ import os import re import time -from framework.dut import Dut -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.sut_node import SutNode from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestMacsecForIxgbe(TestCase): @@ -20,17 +20,17 @@ class TestMacsecForIxgbe(TestCase): self.verify( self.nic in ["IXGBE_10G-82599_SFP"], "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.core_list = self.dut.get_core_list("1S/4C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.core_list = self.sut_node.get_core_list("1S/4C/1T") self.verify( len(self.core_list) >= 4, "There has not enought cores to test this suite" ) - self.session_sec = self.dut.new_session() - self.pci_rx = self.dut.ports_info[self.dut_ports[1]]["pci"] - self.pci_tx = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.mac0 = self.dut.get_mac_address(self.dut_ports[0]) - self.mac1 = self.dut.get_mac_address(self.dut_ports[1]) + self.session_sec = self.sut_node.new_session() + self.pci_rx = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + self.pci_tx = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.mac0 = self.sut_node.get_mac_address(self.sut_ports[0]) + self.mac1 = self.sut_node.get_mac_address(self.sut_ports[1]) if self.logger.log_path.startswith(os.sep): self.output_path = self.logger.log_path @@ -38,7 +38,7 @@ class TestMacsecForIxgbe(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -47,29 +47,29 @@ class TestMacsecForIxgbe(TestCase): self.ol_flags = 1 def start_testpmd_rx(self): - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list[0:2], ports=[self.pci_rx], prefix="rx" ) - app_name = self.dut.apps_name["test-pmd"] + app_name = self.sut_node.apps_name["test-pmd"] cmd_rx = app_name + eal_params + "-- -i --port-topology=chained" - return self.dut.send_expect(cmd_rx, "testpmd", 120) + return self.sut_node.send_expect(cmd_rx, "testpmd", 120) def start_testpmd_tx(self): - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list[2:4], ports=[self.pci_tx], prefix="tx" ) - app_name = self.dut.apps_name["test-pmd"] + app_name = self.sut_node.apps_name["test-pmd"] cmd_tx = app_name + eal_params + "-- -i --port-topology=chained" return self.session_sec.send_expect(cmd_tx, "testpmd", 120) def start_testpmd_perf(self): - eal_params = self.dut.create_eal_parameters(cores=self.core_list[0:2]) - app_name = self.dut.apps_name["test-pmd"] + eal_params = self.sut_node.create_eal_parameters(cores=self.core_list[0:2]) + app_name = self.sut_node.apps_name["test-pmd"] cmd = app_name + eal_params + "-- -i --port-topology=chained" - self.dut.send_expect(cmd, "testpmd", 120) + self.sut_node.send_expect(cmd, "testpmd", 120) self.rx_set_macsec_offload("on", "on") - self.dut.send_expect("set fwd mac", "testpmd>", 2) - self.dut.send_expect("start", "testpmd>", 2) + self.sut_node.send_expect("set fwd mac", "testpmd>", 2) + self.sut_node.send_expect("start", "testpmd>", 2) def show_xstats(self): time.sleep(0.1) @@ -91,10 +91,10 @@ class TestMacsecForIxgbe(TestCase): re.compile("tx_good_packets:\s+(.*?)\s+?").findall(out_out, re.S)[0] ) if self.ol_flags == 0: - pkts_content = self.dut.get_session_output(timeout=2) + pkts_content = self.sut_node.get_session_output(timeout=2) - self.dut.send_expect("stop", "testpmd>", 2) - out_in = self.dut.send_expect("show port xstats 0", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>", 2) + out_in = self.sut_node.send_expect("show port xstats 0", "testpmd>") rx_good_packets = int( re.compile("rx_good_packets:\s+(.*?)\s+?").findall(out_in, re.S)[0] ) @@ -156,18 +156,18 @@ class TestMacsecForIxgbe(TestCase): return result_dict def clear_port_xstats(self): - self.dut.send_expect("clear port xstats 0", "testpmd>") + self.sut_node.send_expect("clear port xstats 0", "testpmd>") self.session_sec.send_expect("clear port xstats 0", "testpmd>") def rx_set_macsec_offload(self, encrypt_rx, replay_rx): # rx port - self.dut.send_expect("port stop 0", "testpmd>", 2) - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd>", 2) + self.sut_node.send_expect( "set macsec offload 0 on encrypt %s replay-protect %s" % (encrypt_rx, replay_rx), "testpmd>", ) - self.dut.send_expect("port start 0", "testpmd>", 2) + self.sut_node.send_expect("port start 0", "testpmd>", 2) def tx_set_macsec_offload(self, encrypt_tx, replay_tx): # tx port @@ -181,19 +181,19 @@ class TestMacsecForIxgbe(TestCase): def rx_set_macsec_various_param(self, pi, idx, an, pn, key): # rx port - self.dut.send_expect("set macsec sc rx 0 %s %s" % (self.mac0, pi), "testpmd>") - self.dut.send_expect( + self.sut_node.send_expect("set macsec sc rx 0 %s %s" % (self.mac0, pi), "testpmd>") + self.sut_node.send_expect( "set macsec sa rx 0 %s %s %s %s" % (idx, an, pn, key), "testpmd>" ) - self.dut.send_expect("set macsec sc tx 0 %s %s" % (self.mac1, pi), "testpmd>") - self.dut.send_expect( + self.sut_node.send_expect("set macsec sc tx 0 %s %s" % (self.mac1, pi), "testpmd>") + self.sut_node.send_expect( "set macsec sa tx 0 %s %s %s %s" % (idx, an, pn, key), "testpmd>" ) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set promisc all on", "testpmd>") + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set promisc all on", "testpmd>") if self.ol_flags == 0: - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("start", "testpmd>", 2) + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("start", "testpmd>", 2) def tx_set_macsec_various_param(self, pi, idx, an, pn, key): # tx port @@ -215,8 +215,8 @@ class TestMacsecForIxgbe(TestCase): def packets_receive_num(self): time.sleep(0.1) self.session_sec.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) - out = self.dut.send_expect("show port stats 0", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>", 2) + out = self.sut_node.send_expect("show port stats 0", "testpmd>") packet_number = re.compile("RX-packets:\s+(.*?)\s+?").findall(out, re.S) return packet_number @@ -273,7 +273,7 @@ class TestMacsecForIxgbe(TestCase): "failed", ) self.session_sec.send_expect("quit", "#") - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") self.start_testpmd_rx() self.rx_set_macsec_offload("on", "on") @@ -297,7 +297,7 @@ class TestMacsecForIxgbe(TestCase): # subcase1:set various index on rx and tx port for i in [1, 2]: if i == 2: - result = self.dut.send_expect( + result = self.sut_node.send_expect( "set macsec sa rx 0 %s 0 0 00112200000000000000000000000000" % i, "testpmd>", ) @@ -316,7 +316,7 @@ class TestMacsecForIxgbe(TestCase): # subcase2:set various an on rx and tx port for i in range(1, 5): if i == 4: - result = self.dut.send_expect( + result = self.sut_node.send_expect( "set macsec sa rx 0 0 %s 0 00112200000000000000000000000000" % i, "testpmd>", ) @@ -332,7 +332,7 @@ class TestMacsecForIxgbe(TestCase): self.check_MACsec_pkts_receive() self.clear_port_xstats() self.session_sec.send_expect("quit", "#") - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") # subcase3:set various pn on rx and tx port for i in [ @@ -350,7 +350,7 @@ class TestMacsecForIxgbe(TestCase): self.start_testpmd_tx() self.tx_set_macsec_offload("on", "on") if i == "0x100000000": - result = self.dut.send_expect( + result = self.sut_node.send_expect( "set macsec sa rx 0 0 0 %s 00112200000000000000000000000000" % i, "testpmd>", ) @@ -372,7 +372,7 @@ class TestMacsecForIxgbe(TestCase): int(pkt_num[0]) == 3, "Rx port can't receive three pkts" ) self.session_sec.send_expect("quit", "#") - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") # subcase4:set various key on rx and tx port for i in [ @@ -387,7 +387,7 @@ class TestMacsecForIxgbe(TestCase): # subcase5:set various pi on rx and tx port for i in [1, "0xffff", "0x10000"]: if i == "0x10000": - result = self.dut.send_expect( + result = self.sut_node.send_expect( "set macsec sc rx 0 %s %s" % (self.mac0, i), "testpmd>" ) self.verify("Bad arguments" in result, "set pi to 0x10000 failed") @@ -409,13 +409,13 @@ class TestMacsecForIxgbe(TestCase): self.ol_flags = 0 # rx port self.start_testpmd_rx() - self.dut.send_expect("port stop 0", "testpmd>", 2) - self.dut.send_expect("set macsec offload 0 off", "testpmd>") - self.dut.send_expect("port start 0", "testpmd>", 2) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set promisc all on", "testpmd>") - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("start", "testpmd>", 2) + self.sut_node.send_expect("port stop 0", "testpmd>", 2) + self.sut_node.send_expect("set macsec offload 0 off", "testpmd>") + self.sut_node.send_expect("port start 0", "testpmd>", 2) + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set promisc all on", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("start", "testpmd>", 2) # tx port self.start_testpmd_tx() @@ -546,23 +546,23 @@ class TestMacsecForIxgbe(TestCase): is 0x88E5, and the packet length is 96bytes, while the normal packet length is 64bytes. """ - self.tester_itf_0 = self.tester.get_interface(self.dut_ports[0]) - self.tester_itf_1 = self.tester.get_interface(self.dut_ports[1]) + self.tg_itf_0 = self.tg_node.get_interface(self.sut_ports[0]) + self.tg_itf_1 = self.tg_node.get_interface(self.sut_ports[1]) self.start_testpmd_perf() # start tcpdump - self.tester.send_expect("rm -rf ./tcpdump_test.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf ./tcpdump_test.cap", "#") + self.tg_node.send_expect( "tcpdump -i %s ether src %s -w ./tcpdump_test.cap 2> /dev/null& " - % (self.tester_itf_0, self.mac0), + % (self.tg_itf_0, self.mac0), "#", ) - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() pkt = 'Ether(dst="%s", src="02:00:00:00:00:01")/IP()/UDP()/("X"*22)' % self.mac1 - p.append_pkt(pkt) - p.send_pkt(self.tester, tx_port=self.tester_itf_1, count=10, timeout=3) + scapy_pkt_builder.append_pkt(pkt) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf_1, count=10, timeout=3) # get tcpdump package - self.tester.send_expect("killall tcpdump", "#") - out = self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + out = self.tg_node.send_expect( "tcpdump -nn -e -v -r ./tcpdump_test.cap", "#", 120 ) self.verify( @@ -576,27 +576,27 @@ class TestMacsecForIxgbe(TestCase): """ self.table_header = ["Frame Size", "Mpps", "% linerate"] self.result_table_create(self.table_header) - txPort = self.tester.get_local_port(self.dut_ports[1]) - rxPort = self.tester.get_local_port(self.dut_ports[0]) + txPort = self.tg_node.get_local_port(self.sut_ports[1]) + rxPort = self.tg_node.get_local_port(self.sut_ports[0]) self.start_testpmd_perf() # prepare traffic generator input flow = ( 'Ether(dst="%s", src="02:00:00:00:00:01")/IP()/UDP()/("X"*22)' % self.mac1 ) pcap = os.sep.join([self.output_path, "test.pcap"]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) + self.tg_node.scapy_execute() tgenInput = [] pcap = os.sep.join([self.output_path, "test.pcap"]) tgenInput.append((txPort, rxPort, pcap)) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) self.verify(pps > 0, "No traffic detected") pps /= 1000000.0 rate = (pps * 100) / self.wirespeed(self.nic, 96, 1) @@ -609,11 +609,11 @@ class TestMacsecForIxgbe(TestCase): Run after each test case. """ self.session_sec.send_expect("quit", "#") - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.close_session(self.session_sec) + self.sut_node.kill_all() + self.sut_node.close_session(self.session_sec) diff --git a/tests/TestSuite_malicious_driver_event_indication.py b/tests/TestSuite_malicious_driver_event_indication.py index 5863bf40..7608d71f 100644 --- a/tests/TestSuite_malicious_driver_event_indication.py +++ b/tests/TestSuite_malicious_driver_event_indication.py @@ -21,11 +21,11 @@ from framework.test_case import TestCase class TestSuiteMaliciousDrvEventIndication(TestCase): def d_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def d_a_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.alt_session.send_expect(*_cmd) + return self.sut_node.alt_session.send_expect(*_cmd) def vf_pmd_con(self, cmd): if not self.vf_pmd_session: @@ -38,9 +38,9 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @@ -59,7 +59,7 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): ) self.d_a_con(cmd) # rebuild dpdk source code - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) @contextmanager def restore_dpdk_compilation(self): @@ -79,16 +79,16 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): ) self.d_a_con(cmd) # rebuild dpdk source code - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) def vf_create(self): port_id = 0 - port_obj = self.dut.ports_info[port_id]["port"] - self.dut.generate_sriov_vfs_by_port(port_id, 1) + port_obj = self.sut_node.ports_info[port_id]["port"] + self.sut_node.generate_sriov_vfs_by_port(port_id, 1) pf_pci = port_obj.pci - sriov_vfs_port = self.dut.ports_info[port_id].get("vfs_port") + sriov_vfs_port = self.sut_node.ports_info[port_id].get("vfs_port") if not sriov_vfs_port: - msg = "failed to create vf on dut port {}".format(pf_pci) + msg = "failed to create vf on SUT port {}".format(pf_pci) raise VerifyFailure(msg) for port in sriov_vfs_port: port.bind_driver(driver=self.drivername) @@ -104,13 +104,13 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): if not self.vf_ports_info: return for port_id, _ in self.vf_ports_info.items(): - self.dut.destroy_sriov_vfs_by_port(port_id) - port_obj = self.dut.ports_info[port_id]["port"] + self.sut_node.destroy_sriov_vfs_by_port(port_id) + port_obj = self.sut_node.ports_info[port_id]["port"] port_obj.bind_driver(self.drivername) self.vf_ports_info = None def init_pf_testpmd(self): - self.pf_testpmd = os.path.join(self.target_dir, self.dut.apps_name["test-pmd"]) + self.pf_testpmd = os.path.join(self.target_dir, self.sut_node.apps_name["test-pmd"]) def start_pf_testpmd(self): core_mask = utils.create_mask(self.pf_pmd_cores) @@ -126,7 +126,7 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): **{ "bin": self.pf_testpmd, "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "whitelist": self.pf_pmd_whitelist, "prefix": "pf_pmd", } @@ -142,13 +142,13 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): self.is_pf_pmd_on = False def get_pf_testpmd_reponse(self): - output = self.dut.get_session_output(timeout=2) + output = self.sut_node.get_session_output(timeout=2) return output def init_vf_testpmd(self): self.vf_pmd_session_name = "vf_testpmd" - self.vf_pmd_session = self.dut.create_session(self.vf_pmd_session_name) - self.vf_pmdout = PmdOutput(self.dut, self.vf_pmd_session) + self.vf_pmd_session = self.sut_node.create_session(self.vf_pmd_session_name) + self.vf_pmdout = PmdOutput(self.sut_node, self.vf_pmd_session) def start_vf_testpmd(self): self.vf_pmdout.start_testpmd( @@ -240,8 +240,8 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): supported_drivers = ["i40e"] result = all( [ - self.dut.ports_info[index]["port"].default_driver in supported_drivers - for index in self.dut_ports + self.sut_node.ports_info[index]["port"].default_driver in supported_drivers + for index in self.sut_ports ] ) msg = "current nic <0> is not supported".format(self.nic) @@ -249,8 +249,8 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): def preset_pmd_res(self): # get whitelist and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - corelist = self.dut.get_core_list("1S/6C/1T", socket=socket)[2:] + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + corelist = self.sut_node.get_core_list("1S/6C/1T", socket=socket)[2:] self.pf_pmd_whitelist = "-a " + self.vf_ports_info[0].get("pf_pci") self.pf_pmd_cores = corelist[:2] self.vf_pmd_allowlst = "-a " + self.vf_ports_info[0].get("vfs_pci")[0] @@ -267,7 +267,7 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): with self.restore_dpdk_compilation(): self.vf_destroy() if self.vf_pmd_session: - self.dut.close_session(self.vf_pmd_session) + self.sut_node.close_session(self.vf_pmd_session) self.vf_pmd_session = None # @@ -277,8 +277,8 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports") self.verify_supported_nic() # prepare testing environment self.preset_test_environment() @@ -303,10 +303,10 @@ class TestSuiteMaliciousDrvEventIndication(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.vf_destroy() if self.vf_pmd_session: - self.dut.close_session(self.vf_pmd_session) + self.sut_node.close_session(self.vf_pmd_session) self.vf_pmd_session = None def test_malicious_driver_event_detected(self): diff --git a/tests/TestSuite_mdd.py b/tests/TestSuite_mdd.py index 93cd9945..54557c92 100644 --- a/tests/TestSuite_mdd.py +++ b/tests/TestSuite_mdd.py @@ -11,8 +11,8 @@ Test the support of Malicious Driver Detection import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.virt_common import VM @@ -26,8 +26,8 @@ class TestMDD(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None # set vf assign method and vf driver @@ -39,25 +39,25 @@ class TestMDD(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.dut.send_expect("dmesg -c", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("dmesg -c", "#") self.port_id_0 = 0 self.port_id_1 = 1 - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.rx_port = self.tester.get_local_port(self.dut_ports[1]) + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.rx_port = self.tg_node.get_local_port(self.sut_ports[1]) def set_up(self): pass def setup_2pf_2vf_1vm_env(self): - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver="ixgbe") - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.used_dut_port_1 = self.dut_ports[1] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver="ixgbe") - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver="ixgbe") + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.used_sut_port_1 = self.sut_ports[1] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 1, driver="ixgbe") + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] try: @@ -72,11 +72,11 @@ class TestMDD(TestCase): vf1_prop = {"opt_host": self.sriov_vfs_port_1[0].pci} # not support driver=igb_uio,because driver is kernel driver # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "mdd") + self.vm0 = VM(self.sut_node, "vm0", "mdd") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: @@ -91,32 +91,32 @@ class TestMDD(TestCase): self.vm0_testpmd.execute_cmd("quit", "# ") self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None - self.dut.virt_exit() + self.sut_node.virt_exit() - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] port.bind_driver() - self.used_dut_port_0 = None + self.used_sut_port_0 = None - if getattr(self, "used_dut_port_1", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - port = self.dut.ports_info[self.used_dut_port_1]["port"] + if getattr(self, "used_sut_port_1", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + port = self.sut_node.ports_info[self.used_sut_port_1]["port"] port.bind_driver() - self.used_dut_port_1 = None + self.used_sut_port_1 = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() def start_testpmd_in_vm(self, txoffload=""): - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd( VM_CORES_MASK, "--portmask=0x3 --tx-offloads=%s" % txoffload @@ -128,19 +128,19 @@ class TestMDD(TestCase): def send_packets(self): tgen_ports = [] - self.tester_intf = self.tester.get_interface(self.tx_port) + self.tg_intf = self.tg_node.get_interface(self.tx_port) tgen_ports.append((self.tx_port, self.rx_port)) self.pmd_vf0_mac = self.vm0_testpmd.get_port_mac(self.port_id_0) dst_mac = self.pmd_vf0_mac - src_mac = self.tester.get_mac(self.tx_port) + src_mac = self.tg_node.get_mac(self.tx_port) - pkt = Packet(pkt_type="UDP", pkt_len=64) - pkt.config_layer("ether", {"dst": dst_mac, "src": src_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=64) + scapy_pkt_builder.config_layer("ether", {"dst": dst_mac, "src": src_mac}) time.sleep(2) self.vm0_testpmd.execute_cmd("clear port stats all") self.vm0_testpmd.execute_cmd("show port stats all") - pkt.send_pkt(self.tester, tx_port=self.tester_intf, count=send_pks_num) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf, count=send_pks_num) time.sleep(2) def result_verify(self, pkt_fwd=True): @@ -161,20 +161,20 @@ class TestMDD(TestCase): self.verify(vf1_tx_cnt == 0, "Packet is forwarded") def config_mdd(self, value): - self.dut.restore_interfaces() - self.dut.send_expect("rmmod ixgbe", "# ", 10) + self.sut_node.restore_interfaces() + self.sut_node.send_expect("rmmod ixgbe", "# ", 10) time.sleep(2) - count = self.dut.send_expect( + count = self.sut_node.send_expect( "./usertools/dpdk-devbind.py -s | grep ixgbe | wc -l", "#" ) m = [value for i in range(int(count))] mdd = "MDD=" + str(m).replace("[", "").replace("]", "").replace(" ", "") - self.dut.send_expect("modprobe ixgbe %s" % mdd, "# ", 10) + self.sut_node.send_expect("modprobe ixgbe %s" % mdd, "# ", 10) time.sleep(5) - for port_info in self.dut.ports_info: + for port_info in self.sut_node.ports_info: port = port_info["port"] intf = port.get_interface_name() - self.dut.send_expect("ifconfig %s up" % intf, "# ", 10) + self.sut_node.send_expect("ifconfig %s up" % intf, "# ", 10) time.sleep(2) def test_1enable_mdd_dpdk_disable(self): @@ -183,7 +183,7 @@ class TestMDD(TestCase): self.start_testpmd_in_vm(txoffload="0x1") self.send_packets() self.result_verify(False) - dmesg = self.dut.send_expect("dmesg -c |grep 'event'", "# ", 10) + dmesg = self.sut_node.send_expect("dmesg -c |grep 'event'", "# ", 10) self.verify("Malicious event" in dmesg, "mdd error") def test_2enable_mdd_dpdk_enable(self): @@ -192,7 +192,7 @@ class TestMDD(TestCase): self.start_testpmd_in_vm(txoffload="0x0") self.send_packets() self.result_verify(False) - dmesg = self.dut.send_expect("dmesg -c |grep 'event'", "# ", 10) + dmesg = self.sut_node.send_expect("dmesg -c |grep 'event'", "# ", 10) self.verify("Malicious event" in dmesg, "mdd error") def test_3disable_mdd_dpdk_disable(self): @@ -201,7 +201,7 @@ class TestMDD(TestCase): self.start_testpmd_in_vm(txoffload="0x1") self.send_packets() self.result_verify(True) - dmesg = self.dut.send_expect("dmesg -c |grep 'event'", "# ", 10) + dmesg = self.sut_node.send_expect("dmesg -c |grep 'event'", "# ", 10) self.verify("Malicious event" not in dmesg, "mdd error") def test_4disable_mdd_dpdk_enable(self): @@ -210,7 +210,7 @@ class TestMDD(TestCase): self.start_testpmd_in_vm(txoffload="0x0") self.send_packets() self.result_verify(True) - dmesg = self.dut.send_expect("dmesg -c |grep 'event'", "# ", 10) + dmesg = self.sut_node.send_expect("dmesg -c |grep 'event'", "# ", 10) self.verify("Malicious event" not in dmesg, "mdd error") def tear_down(self): diff --git a/tests/TestSuite_metering_and_policing.py b/tests/TestSuite_metering_and_policing.py index 02a83181..836adfd9 100644 --- a/tests/TestSuite_metering_and_policing.py +++ b/tests/TestSuite_metering_and_policing.py @@ -13,10 +13,10 @@ import string import time import framework.utils as utils -from framework.dut import Dut from framework.plotting import Plotting from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase @@ -24,30 +24,30 @@ class TestMeteringAndPolicing(TestCase): scapyCmds = [] def start_scapy(self): - self.tester.scapy_foreground() - self.tester.send_expect("scapy", ">>> ", 10) + self.tg_node.scapy_foreground() + self.tg_node.send_expect("scapy", ">>> ", 10) self.scapy_status = True def end_scapy(self): - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") self.scapy_status = False def scapy_execute(self, timeout=60): for cmd in self.scapyCmds: - self.tester.send_expect(cmd, ">>> ", timeout) + self.tg_node.send_expect(cmd, ">>> ", timeout) self.scapyCmds = [] - def copy_config_files_to_dut(self): + def copy_config_files_to_sut(self): """ - Copy firmware.cli, dscp_*.sh from tester to DUT. + Copy firmware.cli, dscp_*.sh from TG to SUT. """ file = "meter_and_policy_config.tar.gz" src_file = r"./dep/%s" % file dst1 = "/tmp" dst2 = os.sep.join([self.target_dir, "drivers/net/softnic"]) - self.dut.session.copy_file_to(src_file, dst1) - self.dut.send_expect("tar xf %s/%s -C %s" % (dst1, file, dst2), "#", 30) + self.sut_node.session.copy_file_to(src_file, dst1) + self.sut_node.send_expect("tar xf %s/%s -C %s" % (dst1, file, dst2), "#", 30) def update_firmware_cli(self, caseID): """ @@ -57,7 +57,7 @@ class TestMeteringAndPolicing(TestCase): [self.target_dir, "drivers/net/softnic/meter_and_policing_firmware.cli"] ) - if len(self.dut_ports) == 4: + if len(self.sut_ports) == 4: self.ori_firmware_cli = os.sep.join( [ self.target_dir, @@ -65,44 +65,44 @@ class TestMeteringAndPolicing(TestCase): ] ) self.new_firmware_cli = "%s-%s" % (self.ori_firmware_cli, caseID) - self.dut.send_expect("rm -f %s" % self.new_firmware_cli, "#") - self.dut.send_expect( + self.sut_node.send_expect("rm -f %s" % self.new_firmware_cli, "#") + self.sut_node.send_expect( "cp %s %s" % (self.ori_firmware_cli, self.new_firmware_cli), "#" ) # link dev - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*link LINK0 dev.*$/link LINK0 dev %s/g' %s" - % (self.dut_p0_pci, self.new_firmware_cli), + % (self.sut_p0_pci, self.new_firmware_cli), "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*link LINK1 dev.*$/link LINK1 dev %s/g' %s" - % (self.dut_p1_pci, self.new_firmware_cli), + % (self.sut_p1_pci, self.new_firmware_cli), "#", ) - if len(self.dut_ports) == 4: - self.dut.send_expect( + if len(self.sut_ports) == 4: + self.sut_node.send_expect( "sed -i -e 's/^.*link LINK2 dev.*$/link LINK2 dev %s/g' %s" - % (self.dut_p2_pci, self.new_firmware_cli), + % (self.sut_p2_pci, self.new_firmware_cli), "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*link LINK3 dev.*$/link LINK3 dev %s/g' %s" - % (self.dut_p3_pci, self.new_firmware_cli), + % (self.sut_p3_pci, self.new_firmware_cli), "#", ) # table action temp = "table action profile AP0" if caseID == 8: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s ipv6 offset 270 fwd meter trtcm tc 1 stats pkts/g' %s" % (temp, temp, self.new_firmware_cli), "#", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s ipv4 offset 270 fwd meter trtcm tc 1 stats pkts/g' %s" % (temp, temp, self.new_firmware_cli), "#", @@ -112,19 +112,19 @@ class TestMeteringAndPolicing(TestCase): temp = "pipeline RX table match" if caseID == 7: target = "hash ext key 16 mask 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF offset 278 buckets 16K size 65K action AP0" - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s %s/g' %s" % (temp, temp, target, self.new_firmware_cli), "#", ) elif caseID == 8: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s acl ipv6 offset 270 size 4K action AP0/g' %s" % (temp, temp, self.new_firmware_cli), "#", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s acl ipv4 offset 270 size 4K action AP0/g' %s" % (temp, temp, self.new_firmware_cli), "#", @@ -133,39 +133,39 @@ class TestMeteringAndPolicing(TestCase): temp = "pipeline RX table 0 dscp" target_dir = "\/".join(self.target_dir.split("/")) if caseID == 10: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s %s\/drivers\/net\/softnic\/dscp_red.sh/g' %s" % (temp, temp, target_dir, self.new_firmware_cli), "#", ) elif caseID == 11: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s %s\/drivers\/net\/softnic\/dscp_yellow.sh/g' %s" % (temp, temp, target_dir, self.new_firmware_cli), "#", ) elif caseID == 12: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s %s\/drivers\/net\/softnic\/dscp_green.sh/g' %s" % (temp, temp, target_dir, self.new_firmware_cli), "#", ) elif caseID == 13: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/^.*%s.*$/%s %s\/drivers\/net\/softnic\/dscp_default.sh/g' %s" % (temp, temp, target_dir, self.new_firmware_cli), "#", ) # thread * pipeline RX/TX enable - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/thread 5 pipeline RX enable/thread %d pipeline RX enable/g' %s" - % (len(self.dut_ports), self.new_firmware_cli), + % (len(self.sut_ports), self.new_firmware_cli), "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/thread 5 pipeline TX enable/thread %d pipeline TX enable/g' %s" - % (len(self.dut_ports), self.new_firmware_cli), + % (len(self.sut_ports), self.new_firmware_cli), "#", ) @@ -173,15 +173,15 @@ class TestMeteringAndPolicing(TestCase): """ Start testpmd. """ - if len(self.dut_ports) == 2: + if len(self.sut_ports) == 2: portmask = "0x4" Corelist = [0, 1, 2] Servicecorelist = "0x4" - if len(self.dut_ports) == 4: + if len(self.sut_ports) == 4: portmask = "0x10" Corelist = [0, 1, 2, 3, 4] Servicecorelist = "0x10" - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.pmd_out.start_testpmd( Corelist, "--rxq=%d --txq=%d --portmask=%s --disable-rss" @@ -190,7 +190,7 @@ class TestMeteringAndPolicing(TestCase): % (Servicecorelist, filename), ) if self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"]: - self.dut.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") def add_port_meter_profile(self, profile_id, cbs=400, pbs=500): """ @@ -198,7 +198,7 @@ class TestMeteringAndPolicing(TestCase): """ cir = 3125000000 pir = 3125000000 - self.dut.send_expect( + self.sut_node.send_expect( "add port meter profile trtcm_rfc2698 %d %d %d %d %d %d 0" % (self.port_id, profile_id, cir, pir, cbs, pbs), "testpmd>", @@ -216,7 +216,7 @@ class TestMeteringAndPolicing(TestCase): if gyrd_action_list[i] != "drop": gyrd_action_list[i] = "color type " + gyrd_action_list[i] - self.dut.send_expect( + self.sut_node.send_expect( "add port meter policy %d %d g_actions %s / end y_actions %s / end r_actions %s / end" % ( port_id, @@ -232,7 +232,7 @@ class TestMeteringAndPolicing(TestCase): """ Create new meter object for the ethernet device. """ - self.dut.send_expect( + self.sut_node.send_expect( "create port meter %d %d %d %d yes %s" % (port_id, mtr_id, profile_id, policy_id, gyrd_action), "testpmd>", @@ -262,7 +262,7 @@ class TestMeteringAndPolicing(TestCase): if protocol == "sctp": proto_id = 132 - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create %d group 0 ingress pattern eth / %s proto mask 255 src mask %s dst mask" " %s src spec %s dst spec %s proto spec %d / %s src mask 65535 dst mask 65535 src " "spec %d dst spec %d / end actions meter mtr_id %d / queue index %d / end" @@ -292,10 +292,10 @@ class TestMeteringAndPolicing(TestCase): def scapy_send_packet(self, ip_ver, protocol, fwd_port, pktsize): """ - Send a packet to DUT port 0 + Send a packet to SUT port 0 """ - source_port = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + source_port = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) protocol = protocol.upper() if ip_ver == "ipv4": @@ -321,7 +321,7 @@ class TestMeteringAndPolicing(TestCase): self.scapyCmds.append( 'sendp([Ether(dst="%s")/%s(src="%s",dst="%s",%s)/%s(sport=%d,dport=%d)/Raw(load="P"*%d)], iface="%s")' % ( - self.dut_p0_mac, + self.sut_p0_mac, tag, src_ip, dst_ip, @@ -345,8 +345,8 @@ class TestMeteringAndPolicing(TestCase): rx_before = [] tx_before = [] - for i in range(0, len(self.dut_ports)): - output = self.dut.send_expect("show port stats %d" % (i), "testpmd>") + for i in range(0, len(self.sut_ports)): + output = self.sut_node.send_expect("show port stats %d" % (i), "testpmd>") if i == 0: rx_before.append( re.compile("RX-packets:\s+(.*?)\s+?").findall(output, re.S) @@ -359,8 +359,8 @@ class TestMeteringAndPolicing(TestCase): rx_after = [] tx_after = [] - for i in range(0, len(self.dut_ports)): - output = self.dut.send_expect("show port stats %d" % (i), "testpmd>") + for i in range(0, len(self.sut_ports)): + output = self.sut_node.send_expect("show port stats %d" % (i), "testpmd>") if i == 0: rx_after.append( re.compile("RX-packets:\s+(.*?)\s+?").findall(output, re.S) @@ -371,14 +371,14 @@ class TestMeteringAndPolicing(TestCase): tx_packets_port = [] temp1 = int(rx_after[0][0]) - int(rx_before[0][0]) rx_packets_port.append(temp1) - for i in range(0, len(self.dut_ports)): + for i in range(0, len(self.sut_ports)): temp2 = int(tx_after[i][0]) - int(tx_before[i][0]) tx_packets_port.append(temp2) self.verify( int(rx_packets_port[0]) == 1, "Wrong: port 0 did not recieve any packet" ) if expect_port == -1: - for i in range(0, len(self.dut_ports)): + for i in range(0, len(self.sut_ports)): self.verify( int(tx_packets_port[i]) == 0, "Wrong: the packet is not dropped" ) @@ -415,25 +415,25 @@ class TestMeteringAndPolicing(TestCase): """ # get absolute directory of target source code self.target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() self.port_nums = 2 self.verify( - len(self.dut_ports) >= self.port_nums, + len(self.sut_ports) >= self.port_nums, "Insufficient ports for speed testing", ) - self.dut_p0_pci = self.dut.get_port_pci(self.dut_ports[0]) - self.dut_p1_pci = self.dut.get_port_pci(self.dut_ports[1]) - if len(self.dut_ports) == 4: - self.dut_p2_pci = self.dut.get_port_pci(self.dut_ports[2]) - self.dut_p3_pci = self.dut.get_port_pci(self.dut_ports[3]) - self.pmd_out = PmdOutput(self.dut) - self.dut_p0_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.port_id = len(self.dut_ports) - self.copy_config_files_to_dut() + self.sut_p0_pci = self.sut_node.get_port_pci(self.sut_ports[0]) + self.sut_p1_pci = self.sut_node.get_port_pci(self.sut_ports[1]) + if len(self.sut_ports) == 4: + self.sut_p2_pci = self.sut_node.get_port_pci(self.sut_ports[2]) + self.sut_p3_pci = self.sut_node.get_port_pci(self.sut_ports[3]) + self.pmd_out = PmdOutput(self.sut_node) + self.sut_p0_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.port_id = len(self.sut_ports) + self.copy_config_files_to_sut() self.start_scapy() def set_up(self): @@ -470,7 +470,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "tcp", 2, pkt_list, [0, 0, 0, 0]) def test_ipv4_ACL_table_RFC2698_GYD(self): @@ -501,7 +501,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "tcp", 2, pkt_list, [-1, 0, 0, 0]) def test_ipv4_ACL_table_RFC2698_GDR(self): @@ -532,7 +532,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=1, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "sctp", 2, pkt_list, [1, -1, -1, 1]) def test_ipv4_ACL_table_RFC2698_DYR(self): @@ -564,7 +564,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "udp", 2, pkt_list, [0, 0, 0, -1]) def test_ipv4_ACL_table_RFC2698_DDD(self): @@ -595,7 +595,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "tcp", 2, pkt_list, [-1, -1, -1, -1]) def test_ipv4_with_same_cbs_and_pbs_GDR(self): @@ -626,7 +626,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "sctp", 2, pkt_list, [0, 0]) def test_ipv4_HASH_table_RFC2698(self): @@ -659,9 +659,9 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "tcp", 2, pkt_list, [0, 0, 0, 0]) - self.dut.send_expect("quit", "#", 30) + self.sut_node.send_expect("quit", "#", 30) # test 2 'g y d 0 0 0' self.start_testpmd(self.new_firmware_cli) @@ -684,9 +684,9 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "tcp", 2, pkt_list, [-1, 0, 0, 0]) - self.dut.send_expect("quit", "#", 30) + self.sut_node.send_expect("quit", "#", 30) # test 5 'd d d 0 0 0' self.start_testpmd(self.new_firmware_cli) @@ -709,9 +709,9 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "tcp", 2, pkt_list, [-1, -1, -1, -1]) - self.dut.send_expect("quit", "#", 30) + self.sut_node.send_expect("quit", "#", 30) # test 3 'g d r 0 0 0' pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=32) @@ -735,9 +735,9 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=1, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "sctp", 2, pkt_list, [1, -1, -1, 1]) - self.dut.send_expect("quit", "#", 30) + self.sut_node.send_expect("quit", "#", 30) # test 4 'd y r 0 0 0' pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=28) @@ -761,9 +761,9 @@ class TestMeteringAndPolicing(TestCase): mtr_id=0, queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.run_port_list("ipv4", "udp", 2, pkt_list, [0, 0, 0, -1]) - self.dut.send_expect("quit", "#", 30) + self.sut_node.send_expect("quit", "#", 30) def test_ipv6_ACL_table_RFC2698(self): """ @@ -810,7 +810,7 @@ class TestMeteringAndPolicing(TestCase): mtr_id=1, queue_index_id=1, ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=60) self.run_port_list("ipv6", "tcp", 2, pkt_list, [-1, 0, 0, 0]) @@ -858,7 +858,7 @@ class TestMeteringAndPolicing(TestCase): protocol="tcp", spec_id=i, mtr_id=i, - queue_index_id=i % len(self.dut_ports), + queue_index_id=i % len(self.sut_ports), ) self.create_flow_rule( ret_id=0, @@ -869,29 +869,29 @@ class TestMeteringAndPolicing(TestCase): queue_index_id=0, ) - self.dut.send_expect("start", "testpmd>") - output = self.dut.send_expect("flow list %d" % (self.port_id), "testpmd>") + self.sut_node.send_expect("start", "testpmd>") + output = self.sut_node.send_expect("flow list %d" % (self.port_id), "testpmd>") print(output) pkt_list = self.run_param(cbs=400, pbs=500, head=40) - if len(self.dut_ports) == 4: + if len(self.sut_ports) == 4: self.run_port_list("ipv4", "tcp", 0, pkt_list, [0, 0, 0, 0]) self.run_port_list("ipv4", "tcp", 1, pkt_list, [-1, 1, 1, 1]) self.run_port_list("ipv4", "tcp", 2, pkt_list, [2, -1, -1, 2]) self.run_port_list("ipv4", "tcp", 3, pkt_list, [3, 3, 3, -1]) - if len(self.dut_ports) == 2: + if len(self.sut_ports) == 2: self.run_port_list("ipv4", "tcp", 0, pkt_list, [0, 0, 0, 0]) self.run_port_list("ipv4", "tcp", 1, pkt_list, [-1, 1, 1, 1]) self.run_port_list("ipv4", "tcp", 2, pkt_list, [0, -1, -1, 0]) self.run_port_list("ipv4", "tcp", 3, pkt_list, [1, 1, 1, -1]) pkt_list = self.run_param(cbs=300, pbs=400, head=40) - if len(self.dut_ports) == 4: + if len(self.sut_ports) == 4: self.run_port_list("ipv4", "tcp", 4, pkt_list, [-1, 0, 0, 0]) self.run_port_list("ipv4", "tcp", 5, pkt_list, [1, -1, -1, 1]) self.run_port_list("ipv4", "tcp", 6, pkt_list, [2, 2, 2, -1]) self.run_port_list("ipv4", "tcp", 7, pkt_list, [-1, -1, -1, -1]) - if len(self.dut_ports) == 2: + if len(self.sut_ports) == 2: self.run_port_list("ipv4", "tcp", 4, pkt_list, [-1, 0, 0, 0]) self.run_port_list("ipv4", "tcp", 5, pkt_list, [1, -1, -1, 1]) self.run_port_list("ipv4", "tcp", 6, pkt_list, [0, 0, 0, -1]) @@ -907,7 +907,7 @@ class TestMeteringAndPolicing(TestCase): pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=40) self.start_testpmd(self.new_firmware_cli) self.add_port_meter_profile(profile_id=0, cbs=cbs, pbs=pbs) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # test 0: GYR self.add_port_meter_policy( @@ -1003,7 +1003,7 @@ class TestMeteringAndPolicing(TestCase): pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=40) self.start_testpmd(self.new_firmware_cli) self.add_port_meter_profile(profile_id=0, cbs=cbs, pbs=pbs) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # test 0: GYR self.add_port_meter_policy( @@ -1099,7 +1099,7 @@ class TestMeteringAndPolicing(TestCase): pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=40) self.start_testpmd(self.new_firmware_cli) self.add_port_meter_profile(profile_id=0, cbs=cbs, pbs=pbs) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # test 0: GYR self.add_port_meter_policy( @@ -1195,7 +1195,7 @@ class TestMeteringAndPolicing(TestCase): pkt_list = self.run_param(cbs=cbs, pbs=pbs, head=40) self.start_testpmd(self.new_firmware_cli) self.add_port_meter_profile(profile_id=0, cbs=cbs, pbs=pbs) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # test 0: GYR self.add_port_meter_policy( @@ -1285,7 +1285,7 @@ class TestMeteringAndPolicing(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_metrics.py b/tests/TestSuite_metrics.py index f3111ec6..ca3ce529 100644 --- a/tests/TestSuite_metrics.py +++ b/tests/TestSuite_metrics.py @@ -16,10 +16,9 @@ from pprint import pformat from framework.config import SuiteConf from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT from framework.pmd_output import PmdOutput -from framework.settings import HEADER_SIZE +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import HEADER_SIZE, TRANSMIT_CONT from framework.test_case import TestCase @@ -42,8 +41,8 @@ class TestMetrics(TestCase): def d_a_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - output = self.dut.alt_session.send_expect(*_cmd) - output2 = self.dut.alt_session.session.get_session_before(2) + output = self.sut_node.alt_session.send_expect(*_cmd) + output2 = self.sut_node.alt_session.session.get_session_before(2) return output + os.linesep + output2 def get_pkt_len(self, pkt_type, frame_size=64): @@ -61,12 +60,12 @@ class TestMetrics(TestCase): # create packet for send pkt_type = pkt_config.get("type") pkt_layers = pkt_config.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - self.logger.debug(pformat(pkt.pktgen.pkt.command())) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + self.logger.debug(pformat(scapy_pkt_builder.scapy_pkt_util.pkt.command())) - return pkt.pktgen.pkt + return scapy_pkt_builder.scapy_pkt_util.pkt def add_stream_to_pktgen(self, txport, rxport, send_pkt, option): stream_ids = [] @@ -74,12 +73,12 @@ class TestMetrics(TestCase): _option = deepcopy(option) _option["pcap"] = pkt # link peer 0 - stream_id = self.tester.pktgen.add_stream(txport, rxport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) # link peer 1 - stream_id = self.tester.pktgen.add_stream(rxport, txport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(rxport, txport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) return stream_ids @@ -89,8 +88,8 @@ class TestMetrics(TestCase): rate = option.get("rate", float(100)) send_pkt = option.get("stream") # clear streams before add new streams - self.tester.pktgen.clear_streams() - # attach streams to pktgen + self.tg_node.perf_tg.clear_streams() + # attach streams to traffic generator stream_option = { "stream_config": { "txmode": {}, @@ -99,19 +98,19 @@ class TestMetrics(TestCase): } } stream_ids = self.add_stream_to_pktgen(txport, rxport, send_pkt, stream_option) - # run pktgen traffic + # run traffic generator traffic traffic_opt = option.get("traffic_opt") self.logger.debug(traffic_opt) - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) return result def run_traffic(self, option): - tester_tx_port_id = self.tester.get_local_port(self.dut_ports[0]) - tester_rx_port_id = self.tester.get_local_port(self.dut_ports[1]) + tg_tx_port_id = self.tg_node.get_local_port(self.sut_ports[0]) + tg_rx_port_id = self.tg_node.get_local_port(self.sut_ports[1]) ports_topo = { - "tx_intf": tester_tx_port_id, - "rx_intf": tester_rx_port_id, + "tx_intf": tg_tx_port_id, + "rx_intf": tg_rx_port_id, "stream": option.get("stream"), "rate": option.get("rate") or 100.0, "traffic_opt": option.get("traffic_opt"), @@ -122,7 +121,7 @@ class TestMetrics(TestCase): return result def init_testpmd(self): - self.testpmd = PmdOutput(self.dut) + self.testpmd = PmdOutput(self.sut_node) def start_testpmd(self, mode): table = { @@ -147,7 +146,7 @@ class TestMetrics(TestCase): def init_proc_info_tool(self): option = f" -v --file-prefix={self.prefix} -- --metrics" - app_name = self.dut.apps_name["proc-info"] + app_name = self.sut_node.apps_name["proc-info"] self.dpdk_proc = os.path.join(app_name + option) self.metrics_stat = [] @@ -305,7 +304,7 @@ class TestMetrics(TestCase): metrics_bps_in = 0 metrics_bps_out = 0 for index, result in enumerate(metrics_stats): - for port_id in self.dut_ports: + for port_id in self.sut_ports: metrics_bps_in += result.get(port_id).get("mean_bits_in") metrics_bps_out += result.get(port_id).get("mean_bits_out") mean_metrics_bps_in = metrics_bps_in / (index + 1) @@ -318,7 +317,7 @@ class TestMetrics(TestCase): def check_metrics_data_after_stop_traffic(self, data): # check mean_bits, it should be zero - for port_id in self.dut_ports: + for port_id in self.sut_ports: for result in data: metrics_bps_in = result.get(port_id).get("mean_bits_in") metrics_bps_out = result.get(port_id).get("mean_bits_out") @@ -326,7 +325,7 @@ class TestMetrics(TestCase): msg = "mean_bits bps is not cleared as exepected" raise VerifyFailure(msg) # check peak_bits, it should be the same - for port_id in self.dut_ports: + for port_id in self.sut_ports: peak_bits_in = [] peak_bits_out = [] for result in data: @@ -336,7 +335,7 @@ class TestMetrics(TestCase): msg = "peak_bits bps is not keep the maximum value" raise VerifyFailure(msg) # check ewma_bits, it should decrease step by step - for port_id in self.dut_ports: + for port_id in self.sut_ports: for key in ["ewma_bits_in", "ewma_bits_out"]: ewma_bits = [] for result in data: @@ -358,15 +357,15 @@ class TestMetrics(TestCase): "values": [list(test_content.values())], } self.display_suite_result(test_cfg) - # display pktgen bit rate statistics on traffic - self.logger.info("pktgen bit rate statistics:") + # display traffic generator bit rate statistics on traffic + self.logger.info("traffic generator bit rate statistics:") pktgen_results = data.get("pktgen_stats_on_traffic") self.display_metrics_bit_rate(pktgen_results) # display metrics bit rate statistics on traffic self.logger.info("dpdk metrics bit rate statistics on traffic:") metrics_results = data.get("metrics_stats_on_traffic") self.display_metrics_bit_rate(metrics_results) - # check bit rate bias between packet generator and dpdk metircs + # check bit rate bias between traffic generator and dpdk metircs in_bias, out_bias = self.calculate_bit_rate_deviation( pktgen_results, metrics_results ) @@ -376,7 +375,7 @@ class TestMetrics(TestCase): self.logger.info(msg) if in_bias > bias or out_bias > bias: msg = ( - "metrics mean_bits bps has more than {} bias" "compared with pktgen bps" + "metrics mean_bits bps has more than {} bias" "compared with traffic generator bps" ).format(bias) raise VerifyFailure(msg) # display dpdk metrics bit rate statistics after stop traffic @@ -462,8 +461,8 @@ class TestMetrics(TestCase): "values": [list(test_content.values())], } self.display_suite_result(test_cfg) - # display pktgen bit rate statistics on traffic - self.logger.info("pktgen bit rate statistics :") + # display traffic generator bit rate statistics on traffic + self.logger.info("traffic generator bit rate statistics :") pktgen_results = data.get("pktgen_stats_on_traffic") self.display_metrics_bit_rate(pktgen_results) pktgen_bps = max( @@ -474,7 +473,7 @@ class TestMetrics(TestCase): metrics_results = data.get("metrics_stats_after_traffic_stop") self.display_metrics_bit_rate(metrics_results) metrics_peak_data = {} - for port_id in self.dut_ports: + for port_id in self.sut_ports: metrics_peak_data[port_id] = { "peak_bits_in": max( [ @@ -494,7 +493,7 @@ class TestMetrics(TestCase): def check_bit_rate_peak_data(self, data): """ check ``peak_bits_in/peak_bits_out`` should keep the first max value - when packet generator work with decreasing traffic rate percent. + when traffic generator work with decreasing traffic rate percent. """ pktgen_stats = [] metrics_stats = [] @@ -510,7 +509,7 @@ class TestMetrics(TestCase): msg = "traffic rate percent does not run with decreasing rate percent" self.verify(all(status), msg) # check ``peak_bits_in/peak_bits_out`` keep the first max value - for port_id in self.dut_ports: + for port_id in self.sut_ports: for key in ["peak_bits_in", "peak_bits_out"]: peak_values = [ metrics_stat.get(port_id).get(key) for metrics_stat in metrics_stats @@ -601,7 +600,7 @@ class TestMetrics(TestCase): def check_one_latecny_data(self, data): """ - packet generator calculates line latency between tx port and rx port, + traffic generator calculates line latency between tx port and rx port, dpdk metrics calculates packet forward latency between rx and tx inside testpmd. These two types latency data are used for different purposes. """ @@ -612,8 +611,8 @@ class TestMetrics(TestCase): "values": [list(test_content.values())], } self.display_suite_result(test_cfg) - # display pktgen latency statistics on traffic - self.logger.info("pktgen line latency statistics :") + # display traffic generator latency statistics on traffic + self.logger.info("traffic generator line latency statistics :") pktgen_results = data.get("pktgen_stats_on_traffic") self.display_metrics_latency([pktgen_results]) # check if the value is reasonable, no reference data @@ -622,13 +621,13 @@ class TestMetrics(TestCase): min_value = value.get("min") average = value.get("average") if max_value == 0 and average == 0 and min_value == 0: - msg = "failed to get pktgen latency data" + msg = "failed to get traffic generator latency data" raise VerifyFailure(msg) continue if max_value > average and average > min_value and min_value > 0: continue msg = ( - "pktgen latency is wrong: " "max <{0}> " "average <{1}> " "min <{2}>" + "traffic generator latency is wrong: " "max <{0}> " "average <{1}> " "min <{2}>" ).format(max_value, average, min_value) raise VerifyFailure(msg) # display dpdk metrics latency statistics @@ -735,8 +734,8 @@ class TestMetrics(TestCase): supported_drivers = ["ixgbe"] result = all( [ - self.dut.ports_info[port_id]["port"].default_driver in supported_drivers - for port_id in self.dut_ports + self.sut_node.ports_info[port_id]["port"].default_driver in supported_drivers + for port_id in self.sut_ports ] ) msg = "current nic is not supported" @@ -781,9 +780,9 @@ class TestMetrics(TestCase): # def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Not enough ports") - self.prefix = "dpdk_" + self.dut.prefix_subfix + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Not enough ports") + self.prefix = "dpdk_" + self.sut_node.prefix_subfix # prepare testing environment self.preset_test_environment() @@ -797,7 +796,7 @@ class TestMetrics(TestCase): def tear_down(self): """Run after each test case.""" - self.dut.kill_all() + self.sut_node.kill_all() def test_perf_bit_rate_peak(self): """ diff --git a/tests/TestSuite_mtu_update.py b/tests/TestSuite_mtu_update.py index 137abf58..39584e64 100644 --- a/tests/TestSuite_mtu_update.py +++ b/tests/TestSuite_mtu_update.py @@ -15,11 +15,8 @@ from time import sleep from typing import List, Tuple import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT -from framework.pktgen_base import TRANSMIT_S_BURST from framework.pmd_output import PmdOutput -from framework.settings import HEADER_SIZE +from framework.settings import HEADER_SIZE, TRANSMIT_CONT, TRANSMIT_S_BURST from framework.test_case import TestCase ETHER_HEADER_LEN = 18 @@ -42,15 +39,15 @@ class TestMtuUpdate(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ When the case of this test suite finished, the environment should clear up. """ - self.tester.send_expect( - f"ifconfig {self.tester.get_interface(self.tester.get_local_port(self.rx_port))} " + self.tg_node.send_expect( + f"ifconfig {self.tg_node.get_interface(self.tg_node.get_local_port(self.rx_port))} " + f"mtu {ETHER_STANDARD_MTU}", "# ", ) @@ -60,19 +57,19 @@ class TestMtuUpdate(TestCase): """ An abstraction to remove repeated code throughout the subclasses of this class """ - return self.dut.send_expect(command, "testpmd>") + return self.sut_node.send_expect(command, "testpmd>") def get_mac_address_for_port(self, port_id: int) -> str: - return self.dut.get_mac_address(port_id) + return self.sut_node.get_mac_address(port_id) def send_scapy_packet(self, port_id: int, packet: str): - itf = self.tester.get_interface(self.tester.get_local_port(port_id)) + itf = self.tg_node.get_interface(self.tg_node.get_local_port(port_id)) - self.tester.scapy_foreground() - mac = self.dut.get_mac_address(port_id) - self.tester.scapy_append(f'dutmac="{mac}"') - self.tester.scapy_append(f'sendp({packet}, iface="{itf}")') - return self.tester.scapy_execute() + self.tg_node.scapy_foreground() + mac = self.sut_node.get_mac_address(port_id) + self.tg_node.scapy_append(f'sutmac="{mac}"') + self.tg_node.scapy_append(f'sendp({packet}, iface="{itf}")') + return self.tg_node.scapy_execute() def send_packet_of_size_to_port(self, port_id: int, pktsize: int): @@ -87,7 +84,7 @@ class TestMtuUpdate(TestCase): padding = max_pktlen - IP_HEADER_LEN - ETHER_HEADER_LEN out = self.send_scapy_packet( port_id, - f'Ether(dst=dutmac, src="52:00:00:00:00:00")/IP()/Raw(load="\x50"*{padding})', + f'Ether(dst=sutmac, src="52:00:00:00:00:00")/IP()/Raw(load="\x50"*{padding})', ) return out @@ -144,48 +141,48 @@ class TestMtuUpdate(TestCase): """ Prerequisite steps for each test suit. """ - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.rx_port = self.dut_ports[0] - self.tx_port = self.dut_ports[1] + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.rx_port = self.sut_ports[0] + self.tx_port = self.sut_ports[1] - cores = self.dut.get_core_list("1S/2C/1T") + cores = self.sut_node.get_core_list("1S/2C/1T") self.coremask = utils.create_mask(cores) self.port_mask = utils.create_mask([self.rx_port, self.tx_port]) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.set_mtu(ETHER_JUMBO_FRAME_MTU + 200) def set_up(self): """ This is to clear up environment before the case run. """ - self.dut.kill_all() + self.sut_node.kill_all() - def admin_tester_port(self, local_port, status): + def admin_tg_port(self, local_port, status): """ Do some operations to the network interface port, such as "up" or "down". """ - if self.tester.get_os_type() == "freebsd": - self.tester.admin_ports(local_port, status) + if self.tg_node.get_os_type() == "freebsd": + self.tg_node.admin_ports(local_port, status) else: - eth = self.tester.get_interface(local_port) - self.tester.admin_ports_linux(eth, status) + eth = self.tg_node.get_interface(local_port) + self.tg_node.admin_ports_linux(eth, status) time.sleep(10) def set_mtu(self, mtu) -> None: """ - A function which sets the MTU of the ports on the tester to a provided value. - This function is primarily used to make sure that the tester will + A function which sets the MTU of the ports on the TG to a provided value. + This function is primarily used to make sure that the TG will always be able to send packets that are larger than a standard mtu while testing. @param mtu: The desired MTU for local ports @return: None """ - self.admin_tester_port(self.tester.get_local_port(self.tx_port), f"mtu {mtu:d}") - self.admin_tester_port(self.tester.get_local_port(self.rx_port), f"mtu {mtu:d}") + self.admin_tg_port(self.tg_node.get_local_port(self.tx_port), f"mtu {mtu:d}") + self.admin_tg_port(self.tg_node.get_local_port(self.rx_port), f"mtu {mtu:d}") def helper_test_mut_checks(self, packet_size): """ @@ -219,7 +216,7 @@ class TestMtuUpdate(TestCase): self.exec("port start all") self.exec("set fwd mac") self.exec("start") - self.pmdout.wait_link_status_up(self.dut_ports[0]) + self.pmdout.wait_link_status_up(self.sut_ports[0]) """ On 1G NICs, when the jubmo frame MTU set > 1500, the software adjust it to MTU+4. """ diff --git a/tests/TestSuite_multicast.py b/tests/TestSuite_multicast.py index f8a8e727..91c16b49 100644 --- a/tests/TestSuite_multicast.py +++ b/tests/TestSuite_multicast.py @@ -41,29 +41,29 @@ class TestMulticast(TestCase): Run at the start of each test suite. Multicast Prerequisites """ - global dutPorts + global sutPorts # Based on h/w type, choose how many ports to use - dutPorts = self.dut.get_ports(self.nic) + sutPorts = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(dutPorts) >= 4, "Insufficient ports for testing") + self.verify(len(sutPorts) >= 4, "Insufficient ports for testing") # Verify that enough threads are available - cores = self.dut.get_core_list("1S/2C/2T") + cores = self.sut_node.get_core_list("1S/2C/2T") self.verify(cores is not None, "Insufficient cores for speed testing") global P1, P3, TG0, TG1, TGs # prepare port mapping TG0<=>P1, TG1<=>P3 - P1 = dutPorts[0] - P2 = dutPorts[1] - P3 = dutPorts[2] - P4 = dutPorts[3] + P1 = sutPorts[0] + P2 = sutPorts[1] + P3 = sutPorts[2] + P4 = sutPorts[3] TGs = [P1, P3] - TG0 = self.tester.get_local_port(TGs[0]) - TG1 = self.tester.get_local_port(TGs[1]) + TG0 = self.tg_node.get_local_port(TGs[0]) + TG1 = self.tg_node.get_local_port(TGs[1]) # make application - out = self.dut.build_dpdk_apps("examples/ipv4_multicast") - self.app_ipv4_multicast_path = self.dut.apps_name["ipv4_multicast"] + out = self.sut_node.build_dpdk_apps("examples/ipv4_multicast") + self.app_ipv4_multicast_path = self.sut_node.apps_name["ipv4_multicast"] self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -77,9 +77,9 @@ class TestMulticast(TestCase): """ IP4 Multicast Forwarding F1~F6 """ - eal_para = self.dut.create_eal_parameters(cores="1S/2C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/1T") payload = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -p %s -q 2" % (self.app_ipv4_multicast_path, eal_para, "0x5"), "IPv4_MULTICAST:", 60, @@ -90,14 +90,14 @@ class TestMulticast(TestCase): for rx_port in trafficFlow[flow][1].split(","): sniff_src = "not 00:00:00:00:00:00" - inst = self.tester.tcpdump_sniff_packets( - intf=self.tester.get_interface(eval(rx_port)), + inst = self.tg_node.tcpdump_sniff_packets( + intf=self.tg_node.get_interface(eval(rx_port)), count=1, filters=[{"layer": "ether", "config": {"src": sniff_src}}], ) - dmac = self.dut.get_mac_address(TGs[int(trafficFlow[flow][0][2])]) + dmac = self.sut_node.get_mac_address(TGs[int(trafficFlow[flow][0][2])]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(src="00:00:00:00:00:00", dst="%s")/IP(dst="%s",src="%s")\ /Raw(load="%s")], iface="%s")' % ( @@ -105,13 +105,13 @@ class TestMulticast(TestCase): trafficFlow[flow][3], trafficFlow[flow][2], payload, - self.tester.get_interface(eval(tx_port)), + self.tg_node.get_interface(eval(tx_port)), ) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(5) # Wait for the sniffer to finish. - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) for i in range(len(pkts)): result = str(pkts[i].show) @@ -136,16 +136,16 @@ class TestMulticast(TestCase): "dst=%s" % expectedMac in result, "Wrong MAC address" ) - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("^C", "#") + self.sut_node.send_expect("^C", "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_multiple_pthread.py b/tests/TestSuite_multiple_pthread.py index 253d6a4a..a68e2ba7 100644 --- a/tests/TestSuite_multiple_pthread.py +++ b/tests/TestSuite_multiple_pthread.py @@ -9,7 +9,7 @@ import string import time import framework.utils as utils -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -19,21 +19,21 @@ class TestMultiplePthread(TestCase): Run at the start of each test suite. """ self.verify( - self.dut.get_os_type() == "linux", + self.sut_node.get_os_type() == "linux", "Test suite currently only supports Linux platforms", ) - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) global valports - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] # Verify that enough ports are available self.verify(len(valports) >= 1, "Insufficient ports for testing") # get socket and cores - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("1S/8C/1T", socket=self.socket) + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("1S/8C/1T", socket=self.socket) self.cores.sort(key=lambda i: int(i)) self.verify(self.cores is not None, "Requested 8 cores failed") self.out_view = {"header": [], "data": []} - self.path = self.dut.apps_name["test-pmd"].rstrip() + self.path = self.sut_node.apps_name["test-pmd"].rstrip() def set_up(self): """ @@ -46,18 +46,18 @@ class TestMultiplePthread(TestCase): Send packets continuous. """ for index in valports: - localPort = self.tester.get_local_port(index) - iface = self.tester.get_interface(localPort) + localPort = self.tg_node.get_local_port(index) + iface = self.tg_node.get_interface(localPort) pcap_str = 'Ether()/IP(src="1.2.3.4", dst="192.168.0.%d")' % (index) - self.pkt = Packet(pcap_str) - intf = self.pkt.send_pkt_bg(crb=self.tester, tx_port=iface) + self.scapy_pkt_builder = ScapyPacketBuilder(pcap_str) + intf = self.scapy_pkt_builder.send_pkt_bg(node=self.tg_node, tx_port=iface) self.send_sessions.append(intf) def get_cores_statistic(self, cmdline): """ Get cpu and thread statistics. """ - mutiple_pthread_session = self.dut.new_session() + mutiple_pthread_session = self.sut_node.new_session() testpmd_name = self.path.split("/")[-1] out = mutiple_pthread_session.send_expect( f"ps -C {testpmd_name} -L -opid,tid,%cpu,psr,args", "#", 20 @@ -65,7 +65,7 @@ class TestMultiplePthread(TestCase): m = cmdline.replace('"', "", 2) out_list = out.split(m) mutiple_pthread_session.send_expect("^C", "#") - self.dut.close_session(mutiple_pthread_session) + self.sut_node.close_session(mutiple_pthread_session) return out_list def verify_before_send_packets(self, out_list): @@ -119,7 +119,7 @@ class TestMultiplePthread(TestCase): else: cmdline = './%s --lcores="%s" -n 4 -- -i' % (self.path, lcores) # start application - self.dut.send_expect(cmdline, "testpmd", 60) + self.sut_node.send_expect(cmdline, "testpmd", 60) out_list = self.get_cores_statistic(cmdline) self.verify_before_send_packets(out_list) @@ -129,9 +129,9 @@ class TestMultiplePthread(TestCase): for cpu in cpu_list: m += "%s," % cpu setline = "set corelist %s" % m[:-1] - self.dut.send_expect(setline, "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - out = self.dut.send_expect("show config fwd", "testpmd> ") + self.sut_node.send_expect(setline, "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + out = self.sut_node.send_expect("show config fwd", "testpmd> ") # check fwd config if len(cpu_list) >= 2: for core in cpu_list[:2]: @@ -148,8 +148,8 @@ class TestMultiplePthread(TestCase): out_list = self.get_cores_statistic(cmdline) self.verify_after_send_packets(out_list, lcore_list) # quit application - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_basic_operation(self): """ @@ -248,7 +248,7 @@ class TestMultiplePthread(TestCase): ] cmdline = random.sample(cmdline_list, 1) - out = self.dut.send_expect(cmdline[0] % self.path, "#", 60) + out = self.sut_node.send_expect(cmdline[0] % self.path, "#", 60) self.verify("invalid parameter" in out, "it's a valid parameter") def tear_down(self): @@ -257,8 +257,8 @@ class TestMultiplePthread(TestCase): """ if len(self.send_sessions) != 0: for session in self.send_sessions: - self.pkt.stop_send_pkt_bg(session) - self.dut.kill_all() + self.scapy_pkt_builder.stop_send_pkt_bg(session) + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_multiprocess.py b/tests/TestSuite_multiprocess.py index 62574bcb..a338c0b5 100644 --- a/tests/TestSuite_multiprocess.py +++ b/tests/TestSuite_multiprocess.py @@ -13,8 +13,8 @@ import time import framework.utils as utils executions = [] -from framework.pktgen import PacketGeneratorHelper from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestMultiprocess(TestCase): @@ -25,20 +25,20 @@ class TestMultiprocess(TestCase): Multiprocess prerequisites. Requirements: OS is not freeBSD - DUT core number >= 4 + SUT core number >= 4 multi_process build pass """ # self.verify('bsdapp' not in self.target, "Multiprocess not support freebsd") - self.verify(len(self.dut.get_all_cores()) >= 4, "Not enough Cores") - self.dut_ports = self.dut.get_ports() - self.socket = self.dut.get_numa_id(self.dut_ports[0]) + self.verify(len(self.sut_node.get_all_cores()) >= 4, "Not enough Cores") + self.sut_ports = self.sut_node.get_ports() + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) extra_option = "-Dexamples='multi_process/client_server_mp/mp_server,multi_process/client_server_mp/mp_client,multi_process/simple_mp,multi_process/symmetric_mp'" - self.dut.build_install_dpdk(target=self.target, extra_options=extra_option) - self.app_mp_client = self.dut.apps_name["mp_client"] - self.app_mp_server = self.dut.apps_name["mp_server"] - self.app_simple_mp = self.dut.apps_name["simple_mp"] - self.app_symmetric_mp = self.dut.apps_name["symmetric_mp"] + self.sut_node.build_install_dpdk(target=self.target, extra_options=extra_option) + self.app_mp_client = self.sut_node.apps_name["mp_client"] + self.app_mp_server = self.sut_node.apps_name["mp_server"] + self.app_simple_mp = self.sut_node.apps_name["simple_mp"] + self.app_symmetric_mp = self.sut_node.apps_name["symmetric_mp"] executions.append({"nprocs": 1, "cores": "1S/1C/1T", "pps": 0}) executions.append({"nprocs": 2, "cores": "1S/1C/2T", "pps": 0}) @@ -48,12 +48,12 @@ class TestMultiprocess(TestCase): executions.append({"nprocs": 8, "cores": "1S/4C/2T", "pps": 0}) self.eal_param = "" - for i in self.dut_ports: - self.eal_param += " -a %s" % self.dut.ports_info[i]["pci"] + for i in self.sut_ports: + self.eal_param += " -a %s" % self.sut_node.ports_info[i]["pci"] - self.eal_para = self.dut.create_eal_parameters(cores="1S/2C/1T") + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/1T") # start new session to run secondary - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() # get dts output path if self.logger.log_path.startswith(os.sep): @@ -62,7 +62,7 @@ class TestMultiprocess(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -75,9 +75,9 @@ class TestMultiprocess(TestCase): Basic operation. """ # Send message from secondary to primary - cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) coremask = utils.create_mask(cores) - self.dut.send_expect( + self.sut_node.send_expect( self.app_simple_mp + " %s --proc-type=primary" % (self.eal_para), "Finished Process Init", 100, @@ -91,14 +91,14 @@ class TestMultiprocess(TestCase): ) self.session_secondary.send_expect("send hello_primary", ">") - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.session_secondary.send_expect("quit", "# ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify( "Received 'hello_primary'" in out, "Message not received on primary process" ) # Send message from primary to secondary - cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) coremask = utils.create_mask(cores) self.session_secondary.send_expect( self.app_simple_mp + " %s --proc-type=primary " % (self.eal_para), @@ -107,15 +107,15 @@ class TestMultiprocess(TestCase): ) time.sleep(20) coremask = hex(int(coremask, 16) * 0x10).rstrip("L") - self.dut.send_expect( + self.sut_node.send_expect( self.app_simple_mp + " %s --proc-type=secondary" % (self.eal_para), "Finished Process Init", 100, ) self.session_secondary.send_expect("send hello_secondary", ">") - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.session_secondary.send_expect("quit", "# ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify( "Received 'hello_secondary'" in out, @@ -127,7 +127,7 @@ class TestMultiprocess(TestCase): Load test of Simple MP application. """ - cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) coremask = utils.create_mask(cores) self.session_secondary.send_expect( self.app_simple_mp + " %s --proc-type=primary" % (self.eal_para), @@ -136,7 +136,7 @@ class TestMultiprocess(TestCase): ) time.sleep(20) coremask = hex(int(coremask, 16) * 0x10).rstrip("L") - self.dut.send_expect( + self.sut_node.send_expect( self.app_simple_mp + " %s --proc-type=secondary" % (self.eal_para), "Finished Process Init", 100, @@ -144,13 +144,13 @@ class TestMultiprocess(TestCase): stringsSent = 0 for line in open("/usr/share/dict/words", "r").readlines(): line = line.split("\n")[0] - self.dut.send_expect("send %s" % line, ">") + self.sut_node.send_expect("send %s" % line, ">") stringsSent += 1 if stringsSent == 3: break time.sleep(5) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.session_secondary.send_expect("quit", "# ") def test_multiprocess_simple_mpapplicationstartup(self): @@ -159,9 +159,9 @@ class TestMultiprocess(TestCase): """ # Send message from secondary to primary (auto process type) - cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) coremask = utils.create_mask(cores) - out = self.dut.send_expect( + out = self.sut_node.send_expect( self.app_simple_mp + " %s --proc-type=auto " % (self.eal_para), "Finished Process Init", 100, @@ -183,15 +183,15 @@ class TestMultiprocess(TestCase): ) self.session_secondary.send_expect("send hello_primary", ">") - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.session_secondary.send_expect("quit", "# ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify( "Received 'hello_primary'" in out, "Message not received on primary process" ) # Send message from primary to secondary (auto process type) - cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) coremask = utils.create_mask(cores) out = self.session_secondary.send_expect( self.app_simple_mp + " %s --proc-type=auto" % (self.eal_para), @@ -204,7 +204,7 @@ class TestMultiprocess(TestCase): ) time.sleep(20) coremask = hex(int(coremask, 16) * 0x10).rstrip("L") - out = self.dut.send_expect( + out = self.sut_node.send_expect( self.app_simple_mp + " %s --proc-type=auto" % (self.eal_para), "Finished Process Init", 100, @@ -214,9 +214,9 @@ class TestMultiprocess(TestCase): "The type of process (SECONDARY) was not detected properly", ) self.session_secondary.send_expect("send hello_secondary", ">", 100) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.session_secondary.send_expect("quit", "# ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify( "Received 'hello_secondary'" in out, @@ -228,7 +228,7 @@ class TestMultiprocess(TestCase): Multiple processes without "--proc-type" flag. """ - cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) coremask = utils.create_mask(cores) self.session_secondary.send_expect( self.app_simple_mp + " %s -m 64" % (self.eal_para), @@ -236,7 +236,7 @@ class TestMultiprocess(TestCase): 100, ) coremask = hex(int(coremask, 16) * 0x10).rstrip("L") - out = self.dut.send_expect( + out = self.sut_node.send_expect( self.app_simple_mp + " %s" % (self.eal_para), "# ", 100 ) @@ -252,11 +252,11 @@ class TestMultiprocess(TestCase): Benchmark Multiprocess performance. #""" packet_count = 16 - self.dut.send_expect("fg", "# ") - txPort = self.tester.get_local_port(self.dut_ports[0]) - rxPort = self.tester.get_local_port(self.dut_ports[1]) - mac = self.tester.get_mac(txPort) - dmac = self.dut.get_mac_address(self.dut_ports[0]) + self.sut_node.send_expect("fg", "# ") + txPort = self.tg_node.get_local_port(self.sut_ports[0]) + rxPort = self.tg_node.get_local_port(self.sut_ports[1]) + mac = self.tg_node.get_mac(txPort) + dmac = self.sut_node.get_mac_address(self.sut_ports[0]) tgenInput = [] # create mutative src_ip+dst_ip package @@ -265,31 +265,31 @@ class TestMultiprocess(TestCase): r'flows = [Ether(src="%s", dst="%s")/IP(src="192.168.1.%d", dst="192.168.1.%d")/("X"*26)]' % (mac, dmac, i + 1, i + 2) ) - self.tester.scapy_append(package) + self.tg_node.scapy_append(package) pcap = os.sep.join([self.output_path, "test_%d.pcap" % i]) - self.tester.scapy_append('wrpcap("%s", flows)' % pcap) + self.tg_node.scapy_append('wrpcap("%s", flows)' % pcap) tgenInput.append([txPort, rxPort, pcap]) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run multiple symmetric_mp process validExecutions = [] for execution in executions: - if len(self.dut.get_core_list(execution["cores"])) == execution["nprocs"]: + if len(self.sut_node.get_core_list(execution["cores"])) == execution["nprocs"]: validExecutions.append(execution) - portMask = utils.create_mask(self.dut_ports) + portMask = utils.create_mask(self.sut_ports) for n in range(len(validExecutions)): execution = validExecutions[n] # get coreList form execution['cores'] - coreList = self.dut.get_core_list(execution["cores"], socket=self.socket) + coreList = self.sut_node.get_core_list(execution["cores"], socket=self.socket) # to run a set of symmetric_mp instances, like test plan - dutSessionList = [] + sutSessionList = [] for index in range(len(coreList)): - dut_new_session = self.dut.new_session() - dutSessionList.append(dut_new_session) - # add -a option when tester and dut in same server - dut_new_session.send_expect( + sut_new_session = self.sut_node.new_session() + sutSessionList.append(sut_new_session) + # add -a option when TG and SUT in same server + sut_new_session.send_expect( self.app_symmetric_mp + " -c %s --proc-type=auto %s -- -p %s --num-procs=%d --proc-id=%d" % ( @@ -303,20 +303,20 @@ class TestMultiprocess(TestCase): ) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) execution["pps"] = pps # close all symmetric_mp process - self.dut.send_expect("killall symmetric_mp", "# ") - # close all dut sessions - for dut_session in dutSessionList: - self.dut.close_session(dut_session) + self.sut_node.send_expect("killall symmetric_mp", "# ") + # close all SUT sessions + for sut_session in sutSessionList: + self.sut_node.close_session(sut_session) # get rate and mpps data for n in range(len(executions)): @@ -350,42 +350,42 @@ class TestMultiprocess(TestCase): """ Benchmark Multiprocess client-server performance. """ - self.dut.kill_all() - self.dut.send_expect("fg", "# ") - txPort = self.tester.get_local_port(self.dut_ports[0]) - rxPort = self.tester.get_local_port(self.dut_ports[1]) - mac = self.tester.get_mac(txPort) - - self.tester.scapy_append( - 'dmac="%s"' % self.dut.get_mac_address(self.dut_ports[0]) + self.sut_node.kill_all() + self.sut_node.send_expect("fg", "# ") + txPort = self.tg_node.get_local_port(self.sut_ports[0]) + rxPort = self.tg_node.get_local_port(self.sut_ports[1]) + mac = self.tg_node.get_mac(txPort) + + self.tg_node.scapy_append( + 'dmac="%s"' % self.sut_node.get_mac_address(self.sut_ports[0]) ) - self.tester.scapy_append('smac="%s"' % mac) - self.tester.scapy_append( + self.tg_node.scapy_append('smac="%s"' % mac) + self.tg_node.scapy_append( 'flows = [Ether(src=smac, dst=dmac)/IP(src="192.168.1.1", dst="192.168.1.1")/("X"*26)]' ) pcap = os.sep.join([self.output_path, "test.pcap"]) - self.tester.scapy_append('wrpcap("%s", flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", flows)' % pcap) + self.tg_node.scapy_execute() validExecutions = [] for execution in executions: - if len(self.dut.get_core_list(execution["cores"])) == execution["nprocs"]: + if len(self.sut_node.get_core_list(execution["cores"])) == execution["nprocs"]: validExecutions.append(execution) for execution in validExecutions: - coreList = self.dut.get_core_list(execution["cores"], socket=self.socket) - # get core with socket parameter to specified which core dut used when tester and dut in same server + coreList = self.sut_node.get_core_list(execution["cores"], socket=self.socket) + # get core with socket parameter to specified which core SUT used when TG and SUT in same server coreMask = utils.create_mask( - self.dut.get_core_list("1S/1C/1T", socket=self.socket) + self.sut_node.get_core_list("1S/1C/1T", socket=self.socket) ) - portMask = utils.create_mask(self.dut_ports) - # specified mp_server core and add -a option when tester and dut in same server - self.dut.send_expect( + portMask = utils.create_mask(self.sut_ports) + # specified mp_server core and add -a option when TG and SUT in same server + self.sut_node.send_expect( self.app_mp_server + " -n %d -c %s %s -- -p %s -n %d" % ( - self.dut.get_memory_channels(), + self.sut_node.get_memory_channels(), coreMask, self.eal_param, portMask, @@ -394,35 +394,35 @@ class TestMultiprocess(TestCase): "Finished Process Init", 20, ) - self.dut.send_expect("^Z", "\r\n") - self.dut.send_expect("bg", "# ") + self.sut_node.send_expect("^Z", "\r\n") + self.sut_node.send_expect("bg", "# ") for n in range(execution["nprocs"]): time.sleep(5) # use next core as mp_client core, different from mp_server coreMask = utils.create_mask([str(int(coreList[n]) + 1)]) - self.dut.send_expect( + self.sut_node.send_expect( self.app_mp_client + " -n %d -c %s --proc-type=secondary %s -- -n %d" - % (self.dut.get_memory_channels(), coreMask, self.eal_param, n), + % (self.sut_node.get_memory_channels(), coreMask, self.eal_param, n), "Finished Process Init", ) - self.dut.send_expect("^Z", "\r\n") - self.dut.send_expect("bg", "# ") + self.sut_node.send_expect("^Z", "\r\n") + self.sut_node.send_expect("bg", "# ") tgenInput = [] tgenInput.append([txPort, rxPort, pcap]) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) execution["pps"] = pps - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(5) for n in range(len(executions)): @@ -472,12 +472,12 @@ class TestMultiprocess(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() pass def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() pass diff --git a/tests/TestSuite_nic_single_core_perf.py b/tests/TestSuite_nic_single_core_perf.py index a916711f..99ee7f56 100644 --- a/tests/TestSuite_nic_single_core_perf.py +++ b/tests/TestSuite_nic_single_core_perf.py @@ -16,10 +16,10 @@ from numpy import mean import framework.rst as rst import framework.utils as utils from framework.exception import VerifyFailure -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestNicSingleCorePerf(TestCase): @@ -62,12 +62,12 @@ class TestNicSingleCorePerf(TestCase): "ICE_25G-E810_XXV_SFP", ]: extra_options = "-Dc_args=-DRTE_LIBRTE_ICE_16BYTE_RX_DESC" - self.dut.build_install_dpdk(self.target, extra_options=extra_options) + self.sut_node.build_install_dpdk(self.target, extra_options=extra_options) # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports() - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.pmdout = PmdOutput(self.dut) + self.sut_ports = self.sut_node.get_ports() + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.pmdout = PmdOutput(self.sut_node) # determine if to save test result as a separated file self.save_result_flag = True @@ -79,7 +79,7 @@ class TestNicSingleCorePerf(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -99,7 +99,7 @@ class TestNicSingleCorePerf(TestCase): # load the expected throughput for required nic if self.nic in ["ConnectX4_LX_MT4117"]: - nic_speed = self.dut.ports_info[0]["port"].get_nic_speed() + nic_speed = self.sut_node.ports_info[0]["port"].get_nic_speed() if nic_speed == "25000": self.expected_throughput = self.get_suite_cfg()["expected_throughput"][ self.nic @@ -154,19 +154,19 @@ class TestNicSingleCorePerf(TestCase): """ payload_size = frame_size - HEADER_SIZE["ip"] - HEADER_SIZE["eth"] pcaps = {} - for _port in self.dut_ports: + for _port in self.sut_ports: if 1 == port_num: flow = [ 'Ether(src="52:00:00:00:00:00")/%s/("X"*%d)' % (self.flows()[_port], payload_size) ] pcap = os.sep.join([self.output_path, "dst{0}.pcap".format(_port)]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, ",".join(flow))) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, ",".join(flow))) + self.tg_node.scapy_execute() pcaps[_port] = [] pcaps[_port].append(pcap) else: - index = self.dut_ports[_port] + index = self.sut_ports[_port] cnt = 0 for layer in self.flows()[_port * 2 : (_port + 1) * 2]: flow = [ @@ -176,10 +176,10 @@ class TestNicSingleCorePerf(TestCase): pcap = os.sep.join( [self.output_path, "dst{0}_{1}.pcap".format(index, cnt)] ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [%s])' % (pcap, ",".join(flow)) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() if index not in pcaps: pcaps[index] = [] pcaps[index].append(pcap) @@ -192,19 +192,19 @@ class TestNicSingleCorePerf(TestCase): """ tgen_input = [] if 1 == port_num: - txIntf = self.tester.get_local_port(self.dut_ports[0]) + txIntf = self.tg_node.get_local_port(self.sut_ports[0]) rxIntf = txIntf for pcap in pcaps[0]: tgen_input.append((txIntf, rxIntf, pcap)) else: for rxPort in range(port_num): if rxPort % port_num == 0 or rxPort**2 == port_num: - txIntf = self.tester.get_local_port(self.dut_ports[rxPort + 1]) - port_id = self.dut_ports[rxPort + 1] + txIntf = self.tg_node.get_local_port(self.sut_ports[rxPort + 1]) + port_id = self.sut_ports[rxPort + 1] else: - txIntf = self.tester.get_local_port(self.dut_ports[rxPort - 1]) - port_id = self.dut_ports[rxPort - 1] - rxIntf = self.tester.get_local_port(self.dut_ports[rxPort]) + txIntf = self.tg_node.get_local_port(self.sut_ports[rxPort - 1]) + port_id = self.sut_ports[rxPort - 1] + rxIntf = self.tg_node.get_local_port(self.sut_ports[rxPort]) for pcap in pcaps[port_id]: tgen_input.append((txIntf, rxIntf, pcap)) return tgen_input @@ -213,7 +213,7 @@ class TestNicSingleCorePerf(TestCase): """ Run nic single core performance """ - self.nb_ports = len(self.dut_ports) + self.nb_ports = len(self.sut_ports) self.verify(self.nb_ports >= 1, "At least 1 port is required to test") self.perf_test(self.nb_ports) self.handle_expected() @@ -238,9 +238,9 @@ class TestNicSingleCorePerf(TestCase): # ports allowlist eal_para = "" for i in range(port_num): - eal_para += " -a " + self.dut.ports_info[i]["pci"] + eal_para += " -a " + self.sut_node.ports_info[i]["pci"] - port_mask = utils.create_mask(self.dut_ports) + port_mask = utils.create_mask(self.sut_ports) for fwd_config in list(self.test_parameters.keys()): # parameters for application/testpmd @@ -251,7 +251,7 @@ class TestNicSingleCorePerf(TestCase): thread_num = int( fwd_config[fwd_config.find("/") + 1 : fwd_config.find("T")] ) - core_list = self.dut.get_core_list(core_config, socket=self.socket) + core_list = self.sut_node.get_core_list(core_config, socket=self.socket) self.verify( len(core_list) >= thread_num, "the Hyper-threading not open, please open it to test", @@ -300,15 +300,15 @@ class TestNicSingleCorePerf(TestCase): self.pmdout.start_testpmd( core_list, parameter, eal_para, socket=self.socket ) - self.dut.send_expect("start", "testpmd> ", 15) + self.sut_node.send_expect("start", "testpmd> ", 15) vm_config = self.set_fields() # clear streams before add new streams - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() - # run packet generator + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, vm_config, self.tester.pktgen + tgenInput, 100, vm_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = { @@ -317,7 +317,7 @@ class TestNicSingleCorePerf(TestCase): "duration": self.test_duration, "interval": self.throughput_stat_sample_interval, } - stats = self.tester.pktgen.measure( + stats = self.tg_node.perf_tg.measure( stream_ids=streams, traffic_opt=traffic_opt ) @@ -347,8 +347,8 @@ class TestNicSingleCorePerf(TestCase): total_mpps_rx = total_pps_rx / 1000000.0 self.throughput[fwd_config][frame_size][nb_desc] = total_mpps_rx - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) self.logger.info( "Trouthput of " @@ -489,5 +489,5 @@ class TestNicSingleCorePerf(TestCase): """ # resume setting if self.rx_desc_size == 16: - self.dut.build_install_dpdk(self.target) - self.dut.kill_all() + self.sut_node.build_install_dpdk(self.target) + self.sut_node.kill_all() diff --git a/tests/TestSuite_ntb.py b/tests/TestSuite_ntb.py index 3c3d765a..41b9133c 100644 --- a/tests/TestSuite_ntb.py +++ b/tests/TestSuite_ntb.py @@ -6,20 +6,19 @@ import os import re import time -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from nics.net_device import GetNicObj class TestNtb(TestCase): def set_up_all(self): - self.verify(len(self.duts) >= 2, "Insufficient duts for NTB!!!") - self.ntb_host = self.duts[0] - self.ntb_client = self.duts[1] + self.verify(len(self.sut_nodes) >= 2, "Insufficient SUTs for NTB!!!") + self.ntb_host = self.sut_nodes[0] + self.ntb_client = self.sut_nodes[1] - # each dut required one ports + # each SUT required one ports self.verify( len(self.ntb_host.get_ports()) >= 1 and len(self.ntb_client.get_ports()) >= 1, @@ -33,12 +32,12 @@ class TestNtb(TestCase): self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.header_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["udp"] - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") out = self.ntb_host.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: self.ntb_host.send_expect("mkdir -p %s" % self.out_path, "# ") @@ -53,12 +52,12 @@ class TestNtb(TestCase): self.table_header = ["FrameSize(B)", "Throughput(Mpps)", "% linerate"] self.result_table_create(self.table_header) - def prepare_dpdk_app(self, crb): - out = crb.send_expect("ls ./" + crb.target + "/kmod/igb_uio.ko", "#", 10) + def prepare_dpdk_app(self, node): + out = node.send_expect("ls ./" + node.target + "/kmod/igb_uio.ko", "#", 10) if "No such file or directory" in out: - crb.build_install_dpdk(crb.target) + node.build_install_dpdk(node.target) - out = crb.build_dpdk_apps("./examples/ntb") + out = node.build_dpdk_apps("./examples/ntb") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") @@ -70,11 +69,11 @@ class TestNtb(TestCase): self.verify( len(self.host_core_list) >= core_number and len(self.client_core_list) >= core_number, - "There have not enough cores to start testpmd on duts", + "There have not enough cores to start testpmd on SUTs", ) - def get_ntb_port(self, crb): - device = crb.send_expect( + def get_ntb_port(self, node): + device = node.send_expect( "lspci -D | grep Non-Transparent |awk '{{print $1}}'", "# ", 10 ) self.verify(device, "Falied to find ntb device") @@ -82,27 +81,27 @@ class TestNtb(TestCase): domain_id = addr_array[0] bus_id = addr_array[1] devfun_id = addr_array[2] - port = GetNicObj(crb, domain_id, bus_id, devfun_id) + port = GetNicObj(node, domain_id, bus_id, devfun_id) return port def set_driver(self, driver=""): self.ntb_host.restore_interfaces() self.ntb_client.restore_interfaces() - for crb in [self.ntb_host, self.ntb_client]: - crb.setup_modules(crb.target, driver, None) + for node in [self.ntb_host, self.ntb_client]: + node.setup_modules(node.target, driver, None) if driver == "igb_uio": - crb.send_expect("rmmod -f igb_uio", "#", 30) - crb.send_expect( - "insmod ./" + crb.target + "/kmod/igb_uio.ko wc_activate=1", "#", 30 + node.send_expect("rmmod -f igb_uio", "#", 30) + node.send_expect( + "insmod ./" + node.target + "/kmod/igb_uio.ko wc_activate=1", "#", 30 ) if driver == "vfio-pci": - crb.send_expect( + node.send_expect( "echo 'base=0x39bfa0000000 size=0x400000 type=write-combining' >> /proc/mtrr", "#", 10, ) - crb.send_expect( + node.send_expect( "echo 'base=0x39bfa0000000 size=0x4000000 type=write-combining' >> /proc/mtrr", "#", 10, @@ -126,7 +125,7 @@ class TestNtb(TestCase): cmd_opt = " ".join(["{}={}".format(key, param[key]) for key in param.keys()]) self.get_core_list() - app = self.dut.apps_name["ntb"] + app = self.sut_node.apps_name["ntb"] eal_host = self.ntb_host.create_eal_parameters(cores=self.host_core_list) eal_client = self.ntb_client.create_eal_parameters(cores=self.client_core_list) host_cmd_line = " ".join([app, eal_host, cmd_opt]) @@ -137,9 +136,9 @@ class TestNtb(TestCase): # self.ntb_host.send_expect(" ", 'ntb> ', 10) # self.ntb_client.send_expect(" ", 'ntb> ', 10) - def start_ntb_fwd_on_dut(self, crb, fwd_mode="io"): - crb.send_expect("set fwd %s" % fwd_mode, "ntb> ", 30) - crb.send_expect("start", "ntb> ", 30) + def start_ntb_fwd_on_sut(self, node, fwd_mode="io"): + node.send_expect("set fwd %s" % fwd_mode, "ntb> ", 30) + node.send_expect("start", "ntb> ", 30) def config_stream(self, frame_size): payload = frame_size - self.header_size @@ -151,9 +150,9 @@ class TestNtb(TestCase): % (each_mac, i, "X" * payload) ) pcap = os.path.join(self.out_path, "ntb_%d_%d.pcap" % (i, frame_size)) - self.tester.scapy_append("flow=" + flow) - self.tester.scapy_append("wrpcap('%s', flow)" % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append("flow=" + flow) + self.tg_node.scapy_append("wrpcap('%s', flow)" % pcap) + self.tg_node.scapy_execute() tgen_input.append((i, (i + 1) % 2, pcap)) return tgen_input @@ -166,18 +165,18 @@ class TestNtb(TestCase): traffic_opt = {"delay": 5} # clear streams before add new streams - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() - # run packet generator + # run traffic generator fields_config = { "ip": { "dst": {"action": "random"}, }, } streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, fields_config, self.tester.pktgen + tgen_input, 100, fields_config, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -186,8 +185,8 @@ class TestNtb(TestCase): throughput = Mpps * 100 / float(self.wirespeed(self.nic, frame_size, 1)) return Mpps, throughput - def get_packets_of_each_port(self, crb): - out = crb.send_expect("show port stats", "ntb> ", 10) + def get_packets_of_each_port(self, node): + out = node.send_expect("show port stats", "ntb> ", 10) info = re.findall("statistics for NTB port", out) index = out.find(info[0]) tx = re.search("Tx-packets:\s*(\d*)", out[index:]) @@ -259,8 +258,8 @@ class TestNtb(TestCase): self.ntb_bind_driver(driver) self.launch_ntb_fwd(**{"buf-size": 65407}) - self.start_ntb_fwd_on_dut(self.ntb_host, fwd_mode="file-trans") - self.start_ntb_fwd_on_dut(self.ntb_client, fwd_mode="file-trans") + self.start_ntb_fwd_on_sut(self.ntb_host, fwd_mode="file-trans") + self.start_ntb_fwd_on_sut(self.ntb_client, fwd_mode="file-trans") self.send_file_and_verify() def test_file_tran_mode_and_vfio_pci(self): @@ -269,8 +268,8 @@ class TestNtb(TestCase): self.ntb_bind_driver(driver) self.launch_ntb_fwd(**{"buf-size": 65407}) - self.start_ntb_fwd_on_dut(self.ntb_host, fwd_mode="file-trans") - self.start_ntb_fwd_on_dut(self.ntb_client, fwd_mode="file-trans") + self.start_ntb_fwd_on_sut(self.ntb_host, fwd_mode="file-trans") + self.start_ntb_fwd_on_sut(self.ntb_client, fwd_mode="file-trans") self.send_file_and_verify() def test_pkt_rxtx_mode_and_igb_uio(self): @@ -279,8 +278,8 @@ class TestNtb(TestCase): self.ntb_bind_driver(driver) self.launch_ntb_fwd(**{"buf-size": 65407}) - self.start_ntb_fwd_on_dut(self.ntb_host, fwd_mode="rxonly") - self.start_ntb_fwd_on_dut(self.ntb_client, fwd_mode="txonly") + self.start_ntb_fwd_on_sut(self.ntb_host, fwd_mode="rxonly") + self.start_ntb_fwd_on_sut(self.ntb_client, fwd_mode="txonly") time.sleep(1) self.check_packets_for_rxtx() @@ -290,8 +289,8 @@ class TestNtb(TestCase): self.ntb_bind_driver(driver) self.launch_ntb_fwd(**{"buf-size": 65407}) - self.start_ntb_fwd_on_dut(self.ntb_host, fwd_mode="rxonly") - self.start_ntb_fwd_on_dut(self.ntb_client, fwd_mode="txonly") + self.start_ntb_fwd_on_sut(self.ntb_host, fwd_mode="rxonly") + self.start_ntb_fwd_on_sut(self.ntb_client, fwd_mode="txonly") time.sleep(1) self.check_packets_for_rxtx() @@ -303,8 +302,8 @@ class TestNtb(TestCase): self.create_table() self.launch_ntb_fwd(**{"burst": 32}) - self.start_ntb_fwd_on_dut(self.ntb_host, fwd_mode="iofwd") - self.start_ntb_fwd_on_dut(self.ntb_client, fwd_mode="iofwd") + self.start_ntb_fwd_on_sut(self.ntb_host, fwd_mode="iofwd") + self.start_ntb_fwd_on_sut(self.ntb_client, fwd_mode="iofwd") self.send_pkg_and_verify() self.result_table_print() @@ -317,8 +316,8 @@ class TestNtb(TestCase): self.create_table() self.launch_ntb_fwd(**{"burst": 32}) - self.start_ntb_fwd_on_dut(self.ntb_host, fwd_mode="iofwd") - self.start_ntb_fwd_on_dut(self.ntb_client, fwd_mode="iofwd") + self.start_ntb_fwd_on_sut(self.ntb_host, fwd_mode="iofwd") + self.start_ntb_fwd_on_sut(self.ntb_client, fwd_mode="iofwd") self.send_pkg_and_verify() self.result_table_print() diff --git a/tests/TestSuite_nvgre.py b/tests/TestSuite_nvgre.py index 99681ea7..c18e9489 100644 --- a/tests/TestSuite_nvgre.py +++ b/tests/TestSuite_nvgre.py @@ -26,8 +26,8 @@ from scapy.sendrecv import sniff from scapy.utils import rdpcap, socket, struct, wrpcap import framework.utils as utils -from framework.packet import IncreaseIP, IncreaseIPv6 from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import IncreaseIP, IncreaseIPv6 from framework.settings import HEADER_SIZE from framework.test_case import TestCase @@ -336,14 +336,14 @@ class NvgreTestConfig(object): def send_pcap(self): """ - Send nvgre pcap file by tester_tx_iface + Send nvgre pcap file by tg_tx_iface """ - self.test_case.tester.scapy_append('pcap = rdpcap("%s")' % self.pcap_file) + self.test_case.tg_node.scapy_append('pcap = rdpcap("%s")' % self.pcap_file) time.sleep(1) - self.test_case.tester.scapy_append( - 'sendp(pcap, iface="%s")' % self.test_case.tester_tx_iface + self.test_case.tg_node.scapy_append( + 'sendp(pcap, iface="%s")' % self.test_case.tg_tx_iface ) - self.test_case.tester.scapy_execute() + self.test_case.tg_node.scapy_execute() time.sleep(1) def pcap_len(self): @@ -391,8 +391,8 @@ class TestNvgre(TestCase): else: self.verify(False, "%s not support NVGRE case" % self.nic) # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports(self.nic) - self.portmask = utils.create_mask(self.dut.get_ports(self.nic)) + ports = self.sut_node.get_ports(self.nic) + self.portmask = utils.create_mask(self.sut_node.get_ports(self.nic)) # Verify that enough ports are available self.verify(len(ports) >= 2, "Insufficient ports for testing") @@ -400,18 +400,18 @@ class TestNvgre(TestCase): # Verify that enough threads are available # start testpmd - self.pmdout = PmdOutput(self.dut) - self.path = self.dut.apps_name["test-pmd"] + self.pmdout = PmdOutput(self.sut_node) + self.path = self.sut_node.apps_name["test-pmd"] # init port - self.dut_rx_port = ports[0] - self.dut_tx_port = ports[1] - self.dut_rx_port_mac = self.dut.get_mac_address(self.dut_rx_port) - self.dut_tx_port_mac = self.dut.get_mac_address(self.dut_tx_port) + self.sut_rx_port = ports[0] + self.sut_tx_port = ports[1] + self.sut_rx_port_mac = self.sut_node.get_mac_address(self.sut_rx_port) + self.sut_tx_port_mac = self.sut_node.get_mac_address(self.sut_tx_port) - self.tester_tx_port = self.tester.get_local_port(self.dut_rx_port) - self.tester_tx_iface = self.tester.get_interface(self.tester_tx_port) - self.tester_rx_port = self.tester.get_local_port(self.dut_tx_port) - self.tester_rx_iface = self.tester.get_interface(self.tester_rx_port) + self.tg_tx_port = self.tg_node.get_local_port(self.sut_rx_port) + self.tg_tx_iface = self.tg_node.get_interface(self.tg_tx_port) + self.tg_rx_port = self.tg_node.get_local_port(self.sut_tx_port) + self.tg_rx_iface = self.tg_node.get_interface(self.tg_rx_port) # invalid parameter self.invalid_mac = "00:00:00:00:01" @@ -533,21 +533,21 @@ class TestNvgre(TestCase): }, ] - self.ports_socket = self.dut.get_numa_id(self.dut_rx_port) + self.ports_socket = self.sut_node.get_numa_id(self.sut_rx_port) def nvgre_detect(self, **kwargs): """ send nvgre packet and check whether testpmd detect the correct packet type """ - self.eal_para = self.dut.create_eal_parameters(cores="1S/5C/1T") - out = self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/5C/1T") + out = self.sut_node.send_expect( r"%s %s -- -i --disable-rss --rxq=4 --txq=4 --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, self.portmask), "testpmd>", 30, ) - out = self.dut.send_expect("set fwd rxonly", "testpmd>", 10) - self.dut.send_expect("set verbose 1", "testpmd>", 10) + out = self.sut_node.send_expect("set fwd rxonly", "testpmd>", 10) + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) arg_str = "" for arg in kwargs: @@ -557,37 +557,37 @@ class TestNvgre(TestCase): self.logger.info("send nvgre pkts %s" % arg_str) config = NvgreTestConfig(self, **kwargs) # now cloud filter will default enable L2 mac filter, so dst mac must be same - config.outer_mac_dst = self.dut_rx_port_mac + config.outer_mac_dst = self.sut_rx_port_mac config.create_pcap() - self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_rx_port) + self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_rx_port) config.send_pcap() # check whether detect nvgre type - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() print(out) self.verify(config.packet_type(self.nic) in out, "Nvgre Packet not detected") - self.dut.send_expect("show port stats all", "testpmd>", 10) - self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("show port stats all", "testpmd>", 10) + self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "#", 10) def nvgre_filter(self, rule, config, queue_id, remove=False): """ send nvgre packet and check whether receive packet in assigned queue """ # send rule - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) self.verify("Flow rule #0 created" in out, "Flow rule create failed") if remove: - self.dut.send_expect("flow flush 0", "testpmd>", 3) + self.sut_node.send_expect("flow flush 0", "testpmd>", 3) queue_id = 0 # send nvgre packet config.create_pcap() - self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_rx_port) + self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_rx_port) config.send_pcap() - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() print(out) queue = -1 pattern = re.compile("- Receive queue=0x(\d)") @@ -602,9 +602,9 @@ class TestNvgre(TestCase): ) # del rule - self.dut.send_expect("flow flush 0", "testpmd>", 10) + self.sut_node.send_expect("flow flush 0", "testpmd>", 10) - self.dut.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("stop", "testpmd>", 10) def nvgre_checksum(self, **kwargs): @@ -616,7 +616,7 @@ class TestNvgre(TestCase): config = NvgreTestConfig(self, **args) # now cloud filter will default enable L2 mac filter, so dst mac must be same - config.outer_mac_dst = self.dut_rx_port_mac + config.outer_mac_dst = self.sut_rx_port_mac # csum function will not change outer ipv src address already if config.outer_ip6_src != "N/A": config.outer_ip6_src = config.outer_ip6_src @@ -636,31 +636,31 @@ class TestNvgre(TestCase): # start testpmd with 2queue/1port - self.eal_para = self.dut.create_eal_parameters(cores="1S/5C/1T") - out = self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/5C/1T") + out = self.sut_node.send_expect( r"%s %s -- -i --disable-rss --rxq=4 --txq=4 --nb-cores=4 --portmask=%s --enable-rx-cksum" % (self.path, self.eal_para, self.portmask), "testpmd>", 30, ) # disable vlan filter - self.dut.send_expect("vlan set filter off %d" % self.dut_rx_port, "testpmd") + self.sut_node.send_expect("vlan set filter off %d" % self.sut_rx_port, "testpmd") # enable tx checksum offload - self.dut.send_expect("set verbose 1", "testpmd>", 10) - self.dut.send_expect("set fwd csum", "testpmd>", 10) - self.dut.send_expect("port stop all", "testpmd>") - self.dut.send_expect("csum set ip hw %d" % (self.dut_tx_port), "testpmd>", 10) - self.dut.send_expect("csum set udp hw %d" % (self.dut_tx_port), "testpmd>", 10) - self.dut.send_expect("csum set tcp hw %d" % (self.dut_tx_port), "testpmd>", 10) - self.dut.send_expect("csum set sctp hw %d" % (self.dut_tx_port), "testpmd>", 10) - self.dut.send_expect( - "csum set outer-ip hw %d" % (self.dut_tx_port), "testpmd>", 10 + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) + self.sut_node.send_expect("set fwd csum", "testpmd>", 10) + self.sut_node.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("csum set ip hw %d" % (self.sut_tx_port), "testpmd>", 10) + self.sut_node.send_expect("csum set udp hw %d" % (self.sut_tx_port), "testpmd>", 10) + self.sut_node.send_expect("csum set tcp hw %d" % (self.sut_tx_port), "testpmd>", 10) + self.sut_node.send_expect("csum set sctp hw %d" % (self.sut_tx_port), "testpmd>", 10) + self.sut_node.send_expect( + "csum set outer-ip hw %d" % (self.sut_tx_port), "testpmd>", 10 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % (self.dut_tx_port), "testpmd>", 10 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % (self.sut_tx_port), "testpmd>", 10 ) - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>") # log the nvgre format arg_str = "" @@ -668,23 +668,23 @@ class TestNvgre(TestCase): arg_str += "[%s = %s]" % (arg, kwargs[arg]) self.logger.info("nvgre packet %s" % arg_str) - out = self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_rx_port) + out = self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_rx_port) # create pcap file with supplied arguments config = NvgreTestConfig(self, **kwargs) - config.outer_mac_dst = self.dut_rx_port_mac + config.outer_mac_dst = self.sut_rx_port_mac config.create_pcap() # remove temporary files - self.tester.send_expect("rm -rf %s" % config.capture_file, "# ") + self.tg_node.send_expect("rm -rf %s" % config.capture_file, "# ") # save the capture packet into pcap format - self.tester.scapy_background() - self.tester.scapy_append( + self.tg_node.scapy_background() + self.tg_node.scapy_append( 'p=sniff(iface="%s",filter="ether[12:2]!=0x88cc",count=1,timeout=5)' - % self.tester_rx_iface + % self.tg_rx_iface ) - self.tester.scapy_append('wrpcap("%s", p)' % config.capture_file) - self.tester.scapy_foreground() + self.tg_node.scapy_append('wrpcap("%s", p)' % config.capture_file) + self.tg_node.scapy_foreground() config.send_pcap() time.sleep(5) @@ -694,7 +694,7 @@ class TestNvgre(TestCase): os.remove(config.capture_file) self.logger.info("chksums_tx:" + str(chksums)) - out = self.dut.send_expect("stop", "testpmd>", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) # verify detected l4 invalid checksum if "inner_l4_invalid" in kwargs and config.inner_l4_type is not "UDP": @@ -710,7 +710,7 @@ class TestNvgre(TestCase): "Failed to count inner ip chksum error", ) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("quit", "#", 10) # verify saved pcap checksum same to expected checksum for key in chksums_default: @@ -835,29 +835,29 @@ class TestNvgre(TestCase): def test_tunnel_filter(self): # verify tunnel filter feature - self.eal_para = self.dut.create_eal_parameters(cores="1S/5C/1T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/5C/1T") + self.sut_node.send_expect( r"%s %s -- -i --disable-rss --rxq=%d --txq=%d --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, MAX_TXQ_RXQ, MAX_TXQ_RXQ, self.portmask), "testpmd>", 30, ) - self.dut.send_expect("set fwd rxonly", "testpmd>", 10) - self.dut.send_expect("set verbose 1", "testpmd>", 10) + self.sut_node.send_expect("set fwd rxonly", "testpmd>", 10) + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) config = NvgreTestConfig(self) config_vlan = NvgreTestConfig(self, inner_vlan=1) # now cloud filter will default enable L2 mac filter, so dst mac must be same - config.outer_mac_dst = self.dut_rx_port_mac - config_vlan.outer_mac_dst = self.dut_rx_port_mac + config.outer_mac_dst = self.sut_rx_port_mac + config_vlan.outer_mac_dst = self.sut_rx_port_mac expect_queue = randint(1, MAX_TXQ_RXQ - 1) rule_list = [ # check outer mac "flow create {} ingress pattern eth dst is {} / ipv4 / nvgre tni is {} / eth dst is {} " "/ end actions pf / queue index {} / end".format( - self.dut_rx_port, + self.sut_rx_port, config_vlan.outer_mac_dst, config_vlan.tni, config_vlan.inner_mac_dst, @@ -866,7 +866,7 @@ class TestNvgre(TestCase): # check inner mac + inner vlan filter can work "flow create {} ingress pattern eth / ipv4 / nvgre / eth dst is {} / vlan tci is {} / end actions pf " "/ queue index {} / end".format( - self.dut_rx_port, + self.sut_rx_port, config_vlan.inner_mac_dst, config_vlan.inner_vlan, expect_queue, @@ -874,7 +874,7 @@ class TestNvgre(TestCase): # check inner mac + inner vlan + tunnel id filter can work "flow create {} ingress pattern eth / ipv4 / nvgre tni is {} / eth dst is {} " "/ vlan tci is {} / end actions pf / queue index {} / end".format( - self.dut_rx_port, + self.sut_rx_port, config_vlan.tni, config_vlan.inner_mac_dst, config_vlan.inner_vlan, @@ -883,15 +883,15 @@ class TestNvgre(TestCase): # check inner mac + tunnel id filter can work "flow create {} ingress pattern eth / ipv4 / nvgre tni is {} / eth dst is {} / end actions pf " "/ queue index {} / end".format( - self.dut_rx_port, config.tni, config.inner_mac_dst, expect_queue + self.sut_rx_port, config.tni, config.inner_mac_dst, expect_queue ), # check inner mac filter can work "flow create {} ingress pattern eth / ipv4 / nvgre / eth dst is {} / end actions pf / queue index {} " - "/ end".format(self.dut_rx_port, config.inner_mac_dst, expect_queue), + "/ end".format(self.sut_rx_port, config.inner_mac_dst, expect_queue), # check outer mac + inner mac + tunnel id filter can work "flow create {} ingress pattern eth dst is {} / ipv4 / nvgre tni is {} / eth dst is {} " "/ end actions pf / queue index {} / end".format( - self.dut_rx_port, + self.sut_rx_port, config.outer_mac_dst, config.tni, config.inner_mac_dst, @@ -899,7 +899,7 @@ class TestNvgre(TestCase): ) # iip not supported by now # 'flow create {} ingress pattern eth / ipv4 / nvgre / eth / ipv4 dst is {} / end actions pf ' - # '/ queue index {} / end'.format(self.dut_port, + # '/ queue index {} / end'.format(self.sut_port, # config.inner_ip_dst, # queue) ] @@ -910,7 +910,7 @@ class TestNvgre(TestCase): else: self.nvgre_filter(rule, config, expect_queue) - self.dut.send_expect("quit", "# ", 10) + self.sut_node.send_expect("quit", "# ", 10) def test_tunnel_filter_invalid(self): # verify tunnel filter parameter check function @@ -919,22 +919,22 @@ class TestNvgre(TestCase): queue_id = 3 config = NvgreTestConfig(self) - config.outer_mac_dst = self.dut_rx_port_mac + config.outer_mac_dst = self.sut_rx_port_mac - self.eal_para = self.dut.create_eal_parameters(cores="1S/5C/1T") - self.dut.send_expect( + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/5C/1T") + self.sut_node.send_expect( r"%s %s -- -i --disable-rss --rxq=4 --txq=4 --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, self.portmask), "testpmd>", 30, ) - self.dut.send_expect("set fwd rxonly", "testpmd>", 10) - self.dut.send_expect("set verbose 1", "testpmd>", 10) + self.sut_node.send_expect("set fwd rxonly", "testpmd>", 10) + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) rule = ( "flow create {} ingress pattern eth dst is {} / ipv4 / nvgre tni is {} / eth dst is {} " "/ end actions pf / queue index {} / end".format( - self.dut_rx_port, + self.sut_rx_port, config.outer_mac_dst, config.tni, config.inner_mac_dst, @@ -946,42 +946,42 @@ class TestNvgre(TestCase): rule = ( "flow create {} ingress pattern eth / ipv4 / nvgre tni is {} / eth dst is {} / end actions pf " "/ queue index {} / end".format( - self.dut_rx_port, config.tni, self.invalid_mac, queue_id + self.sut_rx_port, config.tni, self.invalid_mac, queue_id ) ) - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) self.verify("Bad arguments" in out, "Failed to detect invalid mac") rule = ( "flow create {} ingress pattern eth / ipv4 / nvgre tni is {} / eth / ipv4 dst is {} " "/ end actions pf / queue index {} / end".format( - self.dut_rx_port, config.tni, self.invalid_ip, queue_id + self.sut_rx_port, config.tni, self.invalid_ip, queue_id ) ) - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) self.verify("Bad arguments" in out, "Failed to detect invalid mac") # testpmd is not support # rule = 'flow create {} ingress pattern eth / ipv4 / nvgre tni is {} / eth dst is {} / vlan vid is {} ' \ - # '/ end actions pf / queue index {} / end'.format(self.dut_rx_port, + # '/ end actions pf / queue index {} / end'.format(self.sut_rx_port, # config.tni, # config.inner_mac_dst, # self.invalid_vlan, # queue_id) - # out = self.dut.send_expect(rule, "testpmd>", 3) + # out = self.sut_node.send_expect(rule, "testpmd>", 3) # self.verify("Invalid argument" in out, "Failed to detect invalid vlan") rule = ( "flow create {} ingress pattern eth / ipv4 / nvgre tni is {} / eth dst is {} / end actions pf " "/ queue index {} / end".format( - self.dut_rx_port, config.tni, config.inner_mac_dst, self.invalid_queue + self.sut_rx_port, config.tni, config.inner_mac_dst, self.invalid_queue ) ) - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) self.verify("Invalid queue ID" in out, "Failed to detect invalid queue") - self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "#", 10) def test_nvgre_ipv4_checksum_offload(self): # check normal packet @@ -1031,7 +1031,7 @@ class TestNvgre(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_packet_capture.py b/tests/TestSuite_packet_capture.py index 9244a6ac..2294f9a2 100644 --- a/tests/TestSuite_packet_capture.py +++ b/tests/TestSuite_packet_capture.py @@ -21,11 +21,11 @@ from scapy.packet import Packet as scapyPacket from scapy.utils import rdpcap from framework.exception import VerifyFailure -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase -# These source code copy from packet.py module before sniff_packets/load_sniff_packets +# These source code copy from scapy_packet_builder.py module before sniff_packets/load_sniff_packets # refactor. New refactor methods have much more longer time consumption than # old methods. @@ -95,9 +95,9 @@ def load_sniff_packets(index=""): cap_pkts = rdpcap("/tmp/sniff_%s.pcap" % intf) for pkt in cap_pkts: # packet gen should be scapy - packet = Packet(tx_port=intf) - packet.pktgen.assign_pkt(pkt) - pkts.append(packet) + scapy_pkt_builder = ScapyPacketBuilder(tx_port=intf) + scapy_pkt_builder.scapy_pkt_util.assign_pkt(pkt) + pkts.append(scapy_pkt_builder) except Exception as e: pass @@ -162,8 +162,8 @@ class parsePacket(object): class TestPacketCapture(TestCase): - def is_existed_on_crb(self, check_path, crb="dut"): - alt_session = self.dut.alt_session if crb == "dut" else self.tester.alt_session + def is_existed_on_node(self, check_path, node="sut"): + alt_session = self.sut_node.alt_session if node == "sut" else self.tg_node.alt_session alt_session.send_expect("ls %s" % check_path, "# ") cmd = "echo $?" output = alt_session.send_expect(cmd, "# ") @@ -171,37 +171,37 @@ class TestPacketCapture(TestCase): return ret @property - def is_dut_on_tester(self): - # get dut/tester ip to check if they are in a platform - tester_ip = self.tester.get_ip_address() - dut_ip = self.dut.get_ip_address() - return tester_ip == dut_ip + def is_sut_on_tg(self): + # get SUT/TG ip to check if they are in a platform + tg_ip = self.tg_node.get_ip_address() + sut_ip = self.sut_node.get_ip_address() + return tg_ip == sut_ip @property def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir - def get_dut_iface_with_kernel_driver(self): + def get_sut_iface_with_kernel_driver(self): # only physical nic support PROMISC cmd = "ip link show | grep BROADCAST,MULTICAST | awk {'print $2'}" - out = self.dut.alt_session.send_expect(cmd, "# ") + out = self.sut_node.alt_session.send_expect(cmd, "# ") pat = "(.*):" ifaces = [intf for intf in re.findall(pat, out, re.M) if intf] - for link_port in range(len(self.dut_ports)): - # if they are in a platform, ignore interface used by tester - if not self.is_dut_on_tester: - tester_port = self.tester.get_local_port(link_port) - intf = self.tester.get_interface(tester_port) + for link_port in range(len(self.sut_ports)): + # if they are in a platform, ignore interface used by TG + if not self.is_sut_on_tg: + tg_port = self.tg_node.get_local_port(link_port) + intf = self.tg_node.get_interface(tg_port) if intf in ifaces: ifaces.remove(intf) - # ignore interface used by dut - intf = self.dut.ports_info[link_port]["intf"] + # ignore interface used by SUT + intf = self.sut_node.ports_info[link_port]["intf"] if intf in ifaces: ifaces.remove(intf) @@ -210,16 +210,16 @@ class TestPacketCapture(TestCase): for iface in tmp_ifaces: # ignore current interface used by system cmd = "ifconfig %s | grep 'inet ' " % iface - if self.dut.alt_session.send_expect(cmd, "# ") != "": + if self.sut_node.alt_session.send_expect(cmd, "# ") != "": ifaces.remove(iface) - self.dut.alt_session.send_expect("ifconfig {0} up".format(iface), "# ") + self.sut_node.alt_session.send_expect("ifconfig {0} up".format(iface), "# ") time.sleep(10) # get ports on link status tmp_ifaces = ifaces[:] for iface in tmp_ifaces: cmd = "ip link show {0} | grep LOWER_UP".format(iface) - self.dut.alt_session.send_expect(cmd, "# ") - output = self.dut.alt_session.send_expect( + self.sut_node.alt_session.send_expect(cmd, "# ") + output = self.sut_node.alt_session.send_expect( "echo $?".format(iface), "# " ).strip() if output != "0": @@ -235,8 +235,8 @@ class TestPacketCapture(TestCase): supported_drivers = ["i40e", "ixgbe"] result = all( [ - self.dut.ports_info[index]["port"].default_driver in supported_drivers - for index in self.dut_ports + self.sut_node.ports_info[index]["port"].default_driver in supported_drivers + for index in self.sut_ports ] ) msg = "current nic is not supported" @@ -245,7 +245,7 @@ class TestPacketCapture(TestCase): def get_tcpdump_options(self): param = "" direct_param = r"(\s+)\[ -(\w) in\|out\|inout \]" - tcpdump_help = self.dut.alt_session.send_expect("tcpdump -h", "# ") + tcpdump_help = self.sut_node.alt_session.send_expect("tcpdump -h", "# ") for line in tcpdump_help.split("\n"): m = re.match(direct_param, line) if m: @@ -256,7 +256,7 @@ class TestPacketCapture(TestCase): pcap_lib_dir = os.sep.join( [self.target_dir, self.target, "lib/librte_pmd_pcap.a"] ) - return self.is_existed_on_crb(pcap_lib_dir) + return self.is_existed_on_node(pcap_lib_dir) def get_packet_types(self): packet_types = [ @@ -434,20 +434,20 @@ class TestPacketCapture(TestCase): def clear_ASLR(self): cmd = "echo 0 > /proc/sys/kernel/randomize_va_space" - self.dut.alt_session.send_expect(cmd, "# ", timeout=10) + self.sut_node.alt_session.send_expect(cmd, "# ", timeout=10) time.sleep(2) def reset_ASLR(self): cmd = "echo 2 > /proc/sys/kernel/randomize_va_space" - self.dut.alt_session.send_expect(cmd, "# ", timeout=10) + self.sut_node.alt_session.send_expect(cmd, "# ", timeout=10) time.sleep(4) def start_testpmd(self): - self.dut.alt_session.send_expect( + self.sut_node.alt_session.send_expect( "rm -fr {0}/*".format(self.pdump_log), "# ", 10 ) - if not self.is_dut_on_tester: - self.tester.alt_session.send_expect( + if not self.is_sut_on_tg: + self.tg_node.alt_session.send_expect( "rm -fr {0}/*".format(self.pdump_log), "# ", 10 ) param_opt = "--port-topology=chained" @@ -470,37 +470,37 @@ class TestPacketCapture(TestCase): and option["tx"][0] is not None and option["rx"][0] == option["tx"][0] ): - if self.is_existed_on_crb(self.rxtx_pcap): - self.dut.alt_session.send_expect("rm -f %s" % self.rxtx_pcap, "# ") + if self.is_existed_on_node(self.rxtx_pcap): + self.sut_node.alt_session.send_expect("rm -f %s" % self.rxtx_pcap, "# ") cmd = self.tcpdump.format(self.rxtx_iface, self.rxtx_pcap) self.session_ex.send_expect(cmd, "# ") else: if option["rx"][0] is not None: - if self.is_existed_on_crb(self.rx_pcap): - self.dut.alt_session.send_expect("rm -f %s" % self.rx_pcap, "# ") + if self.is_existed_on_node(self.rx_pcap): + self.sut_node.alt_session.send_expect("rm -f %s" % self.rx_pcap, "# ") cmd = self.tcpdump.format(self.rx_iface, self.rx_pcap) self.session_ex.send_expect(cmd, "# ") if option["tx"][0] is not None: - if self.is_existed_on_crb(self.tx_pcap): - self.dut.alt_session.send_expect("rm -f %s" % self.tx_pcap, "# ") + if self.is_existed_on_node(self.tx_pcap): + self.sut_node.alt_session.send_expect("rm -f %s" % self.tx_pcap, "# ") cmd = self.tcpdump.format(self.tx_iface, self.tx_pcap) self.session_ex.send_expect(cmd, "# ") time.sleep(4) def stop_tcpdump_iface(self): - self.dut.alt_session.send_expect("killall tcpdump", "# ", 5) + self.sut_node.alt_session.send_expect("killall tcpdump", "# ", 5) time.sleep(2) def start_dpdk_pdump(self, option): length_limit = 256 msg = ("pdump option string length should be less than {}").format(length_limit) self.verify(len(option) < length_limit, msg) - self.dut.alt_session.send_expect( + self.sut_node.alt_session.send_expect( "rm -fr {0}/*".format(self.pdump_log), "# ", 20 ) - if not self.is_dut_on_tester: - self.tester.alt_session.send_expect( + if not self.is_sut_on_tg: + self.tg_node.alt_session.send_expect( "rm -fr {0}/*".format(self.pdump_log), "# ", 20 ) cmd = self.dpdk_pdump + " '%s' >/dev/null 2>&1 &" % (option[0]) @@ -511,18 +511,18 @@ class TestPacketCapture(TestCase): rx_dump_pcap = option["rx"][0] if rx_dump_pcap: self.verify( - self.is_existed_on_crb(rx_dump_pcap), + self.is_existed_on_node(rx_dump_pcap), "{1} {0} is not ready".format(rx_dump_pcap, self.tool_name), ) tx_dump_pcap = option["tx"][0] if tx_dump_pcap: self.verify( - self.is_existed_on_crb(tx_dump_pcap), + self.is_existed_on_node(tx_dump_pcap), "{1} {0} is not ready".format(tx_dump_pcap, self.tool_name), ) def stop_dpdk_pdump(self): - self.dut.alt_session.send_expect("killall %s" % self.tool_name, "# ", 5) + self.sut_node.alt_session.send_expect("killall %s" % self.tool_name, "# ", 5) time.sleep(2) def packet_capture_dump_packets(self, pkt_type, number, **kwargs): @@ -531,44 +531,44 @@ class TestPacketCapture(TestCase): self.testpmd.execute_cmd("vlan set filter off 0") self.testpmd.execute_cmd("vlan set filter off 1") self.testpmd.execute_cmd("start") - # tester's port 0 and port 1 work under chained mode. - port_0 = self.dut_ports[self.test_port0_id] - port_1 = self.dut_ports[self.test_port1_id] + # TG's port 0 and port 1 work under chained mode. + port_0 = self.sut_ports[self.test_port0_id] + port_1 = self.sut_ports[self.test_port1_id] # check send tx packet by port 0 - # send packet to dut and compare dpdk-pdump dump pcap with + # send packet to SUT and compare dpdk-pdump dump pcap with # scapy pcap file - intf = self.tester.get_interface(self.tester.get_local_port(port_1)) + intf = self.tg_node.get_interface(self.tg_node.get_local_port(port_1)) # prepare to catch replay packet in out port recPkt = os.path.join("/tmp", "sniff_%s.pcap" % intf) if os.path.exists(recPkt): os.remove(recPkt) if pkt_type == "LLDP": - index = self.tester.tcpdump_sniff_packets( + index = self.tg_node.tcpdump_sniff_packets( intf=intf, count=1, lldp_forbid=False ) else: - index = self.tester.tcpdump_sniff_packets(intf=intf, count=1) - filename = "{}sniff_{}.pcap".format(self.tester.tmp_file, intf) - self.tester.session.copy_file_from(filename, recPkt) + index = self.tg_node.tcpdump_sniff_packets(intf=intf, count=1) + filename = "{}sniff_{}.pcap".format(self.tg_node.tmp_file, intf) + self.tg_node.session.copy_file_from(filename, recPkt) # index = sniff_packets(intf, count=1, timeout=20, pcap=recPkt) - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) if pkt_type == "VLAN_UDP": - pkt.config_layer("dot1q", {"vlan": 20}) - src_mac = self.tester.get_mac(self.tester.get_local_port(port_0)) - pkt.config_layer("ether", {"src": src_mac}) + scapy_pkt_builder.config_layer("dot1q", {"vlan": 20}) + src_mac = self.tg_node.get_mac(self.tg_node.get_local_port(port_0)) + scapy_pkt_builder.config_layer("ether", {"src": src_mac}) # save send packet in a pcap file refPkt = self.send_pcap % (pkt_type, "rx", number) if os.path.exists(refPkt): os.remove(refPkt) - pkt.save_pcapfile(filename=refPkt) + scapy_pkt_builder.save_pcapfile(filename=refPkt) # send out test packet - tester_port = self.tester.get_local_port(port_0) - intf = self.tester.get_interface(tester_port) - pkt.send_pkt(self.tester, tx_port=intf) + tg_port = self.tg_node.get_local_port(port_0) + intf = self.tg_node.get_interface(tg_port) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf) # load pcap file caught by out port time.sleep(1) - pkts = self.tester.load_tcpdump_sniff_packets(index) + pkts = self.tg_node.load_tcpdump_sniff_packets(index) pkts.save_pcapfile(filename=recPkt) # load_sniff_packets(index) # compare pcap file received by out port with scapy reference @@ -577,37 +577,37 @@ class TestPacketCapture(TestCase): msg = "tcpdump rx Receive Packet error: {0}".format(warning) self.verify(not warning, msg) # check send tx packet by port 1 - # send packet to dut and compare dpdk-pdump dump pcap + # send packet to SUT and compare dpdk-pdump dump pcap # with scapy pcap file - intf = self.tester.get_interface(self.tester.get_local_port(port_0)) + intf = self.tg_node.get_interface(self.tg_node.get_local_port(port_0)) # prepare to catch replay packet in out port recPkt = os.path.join("/tmp", "sniff_%s.pcap" % intf) if os.path.exists(recPkt): os.remove(recPkt) if pkt_type == "LLDP": - index = self.tester.tcpdump_sniff_packets( + index = self.tg_node.tcpdump_sniff_packets( intf=intf, count=1, lldp_forbid=False ) else: - index = self.tester.tcpdump_sniff_packets(intf=intf, count=1) - pkt = Packet(pkt_type=pkt_type) + index = self.tg_node.tcpdump_sniff_packets(intf=intf, count=1) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) if pkt_type == "VLAN_UDP": - pkt.config_layer("dot1q", {"vlan": 20}) - src_mac = self.tester.get_mac(self.tester.get_local_port(port_1)) - pkt.config_layer("ether", {"src": src_mac}) + scapy_pkt_builder.config_layer("dot1q", {"vlan": 20}) + src_mac = self.tg_node.get_mac(self.tg_node.get_local_port(port_1)) + scapy_pkt_builder.config_layer("ether", {"src": src_mac}) # save send packet in a pcap file refPkt = self.send_pcap % (pkt_type, "tx", number) if os.path.exists(refPkt): os.remove(refPkt) - pkt.save_pcapfile(filename=refPkt) - # pkt.pktgen.write_pcap(refPkt) + scapy_pkt_builder.save_pcapfile(filename=refPkt) + # scapy_pkt_builder.scapy_pkt_util.write_pcap(refPkt) # send out test packet - tester_port = self.tester.get_local_port(port_1) - intf = self.tester.get_interface(tester_port) - pkt.send_pkt(self.tester, tx_port=intf) + tg_port = self.tg_node.get_local_port(port_1) + intf = self.tg_node.get_interface(tg_port) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf) # load pcap file caught by out port time.sleep(1) - pkts = self.tester.load_tcpdump_sniff_packets(index) + pkts = self.tg_node.load_tcpdump_sniff_packets(index) pkts.save_pcapfile(filename=recPkt) # compare pcap file received by out port # with scapy reference packet pcap file @@ -674,17 +674,17 @@ class TestPacketCapture(TestCase): time.sleep(2) if self.dev_iface_flag: self.stop_tcpdump_iface() - if not self.is_dut_on_tester: - # copy rx pdump data from dut - if self.is_existed_on_crb(self.rx_pcap): + if not self.is_sut_on_tg: + # copy rx pdump data from SUT + if self.is_existed_on_node(self.rx_pcap): if os.path.exists(self.rx_pcap): os.remove(self.rx_pcap) - self.dut.session.copy_file_from(self.rx_pcap, self.rx_pcap) - # copy tx pdump data from dut - if self.is_existed_on_crb(self.tx_pcap): + self.sut_node.session.copy_file_from(self.rx_pcap, self.rx_pcap) + # copy tx pdump data from SUT + if self.is_existed_on_node(self.tx_pcap): if os.path.exists(self.tx_pcap): os.remove(self.tx_pcap) - self.dut.session.copy_file_from(self.tx_pcap, self.tx_pcap) + self.sut_node.session.copy_file_from(self.tx_pcap, self.tx_pcap) self.stop_dpdk_pdump() self.stop_testpmd() self.reset_ASLR() @@ -707,12 +707,12 @@ class TestPacketCapture(TestCase): def test_pdump_port(self): """ test different port options:: - *. port= - *. device_id= + *. port= + *. device_id= """ port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["port"] options = self.generate_options(port_id, port_pci, port_name, test_types) self.packet_capture_test_options(options) @@ -725,8 +725,8 @@ class TestPacketCapture(TestCase): *. tx-dev=/xxx/pdump-tx.pcap """ port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["dev-pcap"] options = self.generate_options(port_id, port_pci, port_name, test_types) self.packet_capture_test_options(options) @@ -734,14 +734,14 @@ class TestPacketCapture(TestCase): def test_pdump_dev_iface(self): """ test different dump options with interfaces as output:: - *. tx-dev=,rx-dev= - *. rx-dev= - *. tx-dev= + *. tx-dev=,rx-dev= + *. rx-dev= + *. tx-dev= """ - self.get_dut_iface_with_kernel_driver() + self.get_sut_iface_with_kernel_driver() port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["dev-iface"] self.dev_iface_flag = True options = self.generate_options(port_id, port_pci, port_name, test_types) @@ -756,8 +756,8 @@ class TestPacketCapture(TestCase): queue=* """ port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["queue"] options = self.generate_options(port_id, port_pci, port_name, test_types) self.packet_capture_test_options(options) @@ -767,8 +767,8 @@ class TestPacketCapture(TestCase): test ring size option, set value within 2^[1~27] """ port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["ring_size"] options = self.generate_options(port_id, port_pci, port_name, test_types) self.packet_capture_test_options(options) @@ -780,8 +780,8 @@ class TestPacketCapture(TestCase): max value is decided by test platform memory size. """ port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["mbuf_size"] options = self.generate_options(port_id, port_pci, port_name, test_types) @@ -792,8 +792,8 @@ class TestPacketCapture(TestCase): test total-num-mbufs option, set value within [1025~65535] """ port_id = self.test_port0_id - port_name = self.dut.ports_info[port_id]["intf"] - port_pci = self.dut.ports_info[port_id]["pci"] + port_name = self.sut_node.ports_info[port_id]["intf"] + port_pci = self.sut_node.ports_info[port_id]["pci"] test_types = ["total_num_mbufs"] options = self.generate_options(port_id, port_pci, port_name, test_types) self.packet_capture_test_options(options) @@ -806,26 +806,26 @@ class TestPacketCapture(TestCase): self.target == "x86_64-native-linuxapp-gcc", "only support x86_64-native-linuxapp-gcc", ) - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) == 2, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) == 2, "Insufficient ports for testing") self.verify_supported_nic() self.test_port0_id = 0 self.test_port1_id = 1 # used for save log self.pdump_log = os.sep.join(["/tmp", "pdumpLog"]) - if not self.is_existed_on_crb(self.pdump_log): + if not self.is_existed_on_node(self.pdump_log): cmd = "mkdir -p {0}".format(self.pdump_log) - self.dut.alt_session.send_expect(cmd, "# ") - if not self.is_dut_on_tester and not self.is_existed_on_crb( - self.pdump_log, crb="tester" + self.sut_node.alt_session.send_expect(cmd, "# ") + if not self.is_sut_on_tg and not self.is_existed_on_node( + self.pdump_log, node="tg" ): cmd = "mkdir -p {0}".format(self.pdump_log) - self.tester.alt_session.send_expect(cmd, "# ") + self.tg_node.alt_session.send_expect(cmd, "# ") # secondary process (dpdk-pdump) - self.dut_dpdk_pdump_dir = self.dut.apps_name["pdump"] - self.tool_name = self.dut_dpdk_pdump_dir.split("/")[-1] - self.session_ex = self.dut.new_session(self.tool_name) - self.dpdk_pdump = self.dut_dpdk_pdump_dir + " -v --file-prefix=test -- --pdump " + self.sut_dpdk_pdump_dir = self.sut_node.apps_name["pdump"] + self.tool_name = self.sut_dpdk_pdump_dir.split("/")[-1] + self.session_ex = self.sut_node.new_session(self.tool_name) + self.dpdk_pdump = self.sut_dpdk_pdump_dir + " -v --file-prefix=test -- --pdump " self.send_pcap = os.sep.join([self.pdump_log, "scapy_%s_%s_%d.pcap"]) self.rx_pcap = os.sep.join([self.pdump_log, "pdump-rx.pcap"]) self.tx_pcap = os.sep.join([self.pdump_log, "pdump-tx.pcap"]) @@ -837,7 +837,7 @@ class TestPacketCapture(TestCase): self.tx_packet_pos = 0 self.dev_iface_flag = False # primary process - self.testpmd = PmdOutput(self.dut) + self.testpmd = PmdOutput(self.sut_node) # False: reduce test items for regression testing, # shut off base test environment checking # True: make a full range testing @@ -859,8 +859,8 @@ class TestPacketCapture(TestCase): """ if not self.exit_flag: self.stop_dpdk_pdump() - self.dut.alt_session.send_expect("killall testpmd", "# ") - self.tester.alt_session.send_expect("killall tcpdump", "# ") + self.sut_node.alt_session.send_expect("killall testpmd", "# ") + self.tg_node.alt_session.send_expect("killall tcpdump", "# ") self.reset_ASLR() if self.dev_iface_flag: self.stop_tcpdump_iface() @@ -874,4 +874,4 @@ class TestPacketCapture(TestCase): self.reset_ASLR() self.session_ex.close() self.session_ex = None - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_packet_ordering.py b/tests/TestSuite_packet_ordering.py index 1e88e3bf..c0a82984 100644 --- a/tests/TestSuite_packet_ordering.py +++ b/tests/TestSuite_packet_ordering.py @@ -11,7 +11,7 @@ import os import time import framework.utils as utils -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -21,26 +21,26 @@ class TestPacketOrdering(TestCase): Executes the Packet Ordering prerequisites. Creates a simple scapy packet to be used later on the tests. It also compiles the example app. """ - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) global valports - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] # Verify that enough ports are available self.verify(len(valports) >= 1, "Insufficient ports for speed testing") - self.port = self.tester.get_local_port(valports[0]) + self.port = self.tg_node.get_local_port(valports[0]) # get socket and cores - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("1S/4C/1T", socket=self.socket) - self.eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("1S/4C/1T", socket=self.socket) + self.eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") self.verify(self.cores is not None, "Insufficient cores for speed testing") self.core_mask = utils.create_mask(self.cores) self.port_mask = utils.create_mask(valports) # Builds the packet ordering example app and checks for errors. - # out = self.dut.send_expect("make -C examples/packet_ordering", "#") - out = self.dut.build_dpdk_apps("./examples/packet_ordering") + # out = self.sut_node.send_expect("make -C examples/packet_ordering", "#") + out = self.sut_node.build_dpdk_apps("./examples/packet_ordering") self.verify( "Error" not in out and "No such file" not in out, "Compilation error" ) @@ -53,21 +53,21 @@ class TestPacketOrdering(TestCase): def start_application(self): - app_name = self.dut.apps_name["packet_ordering"] + app_name = self.sut_node.apps_name["packet_ordering"] cmdline = app_name + "{0} -- -p {1}".format(self.eal_para, self.port_mask) # Executes the packet ordering example app. - self.dut.send_expect(cmdline, "REORDERAPP", 120) + self.sut_node.send_expect(cmdline, "REORDERAPP", 120) def remove_dhcp_from_revpackets(self, inst, timeout=3): - pkts = self.tester.load_tcpdump_sniff_packets(inst, timeout) + scapy_pkts = self.tg_node.load_tcpdump_sniff_packets(inst, timeout) i = 0 - while len(pkts) != 0 and i <= len(pkts) - 1: - if pkts[i].pktgen.pkt.haslayer("DHCP"): - pkts.remove(pkts[i]) + while len(scapy_pkts) != 0 and i <= len(scapy_pkts) - 1: + if scapy_pkts[i].scapy_pkt_util.pkt.haslayer("DHCP"): + scapy_pkts.remove(scapy_pkts[i]) i = i - 1 i = i + 1 - return pkts + return scapy_pkts def send_ordered_packet(self): """ @@ -75,22 +75,22 @@ class TestPacketOrdering(TestCase): compose the pcap file, each queue has same 5 tuple and diff load info """ - pkt = Packet() + scapy_pkt_builder = ScapyPacketBuilder() src_ip = "11.12.13.1" pay_load = "000001" packet_num = 1000 smac = "00:00:00:00:00:00" - rx_interface = self.tester.get_interface(self.port) + rx_interface = self.tg_node.get_interface(self.port) tx_interface = rx_interface for _port in valports: index = valports[_port] - dmac = self.dut.get_mac_address(index) + dmac = self.sut_node.get_mac_address(index) config_opt = [ ("ether", {"dst": dmac, "src": smac}), ("ipv4", {"src": src_ip, "dst": "11.12.1.1"}), ("udp", {"src": 123, "dst": 12}), ] - pkt.generate_random_pkts( + scapy_pkt_builder.generate_random_pkts( pktnum=packet_num, random_type=["UDP"], ip_increase=False, @@ -100,11 +100,11 @@ class TestPacketOrdering(TestCase): # config raw info in pkts for i in range(packet_num): payload = "0000%.3d" % (i + 1) - pkt.pktgen.pkts[i + packet_num * _port]["Raw"].load = payload + scapy_pkt_builder.scapy_pkt_util.pkts[i + packet_num * _port]["Raw"].load = payload filt = [{"layer": "ether", "config": {"src": "%s" % smac}}] - inst = self.tester.tcpdump_sniff_packets(rx_interface, filters=filt) - pkt.send_pkt(crb=self.tester, tx_port=tx_interface, timeout=300) + inst = self.tg_node.tcpdump_sniff_packets(rx_interface, filters=filt) + scapy_pkt_builder.send_pkt(node=self.tg_node, tx_port=tx_interface, timeout=300) self.pkts = self.remove_dhcp_from_revpackets(inst) def check_packet_order(self): @@ -138,13 +138,13 @@ class TestPacketOrdering(TestCase): self.send_ordered_packet() # check packet ordering self.check_packet_order() - self.dut.send_expect("^c", "#", 10) + self.sut_node.send_expect("^c", "#", 10) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_perf_virtio_user_loopback.py b/tests/TestSuite_perf_virtio_user_loopback.py index 457db559..6d1f933d 100644 --- a/tests/TestSuite_perf_virtio_user_loopback.py +++ b/tests/TestSuite_perf_virtio_user_loopback.py @@ -32,24 +32,24 @@ class TestPerfVirtioUserLoopback(TestCase): self.nb_ports = 1 self.nb_cores = 1 self.queue_number = 1 - cores_num = len(set([int(core["socket"]) for core in self.dut.cores])) + cores_num = len(set([int(core["socket"]) for core in self.sut_node.cores])) # set diff arg about mem_socket base on socket number self.socket_mem = ",".join(["1024"] * cores_num) self.core_config = "1S/4C/1T" self.verify( len(self.core_config) >= 4, "There has not enought cores to test this suite" ) - self.core_list = self.dut.get_core_list(self.core_config) + self.core_list = self.sut_node.get_core_list(self.core_config) self.core_list_user = self.core_list[0:2] self.core_list_host = self.core_list[2:4] - self.vhost = self.dut.new_session(suite="vhost") - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost) - self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user) + self.vhost = self.sut_node.new_session(suite="vhost") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost) + self.virtio_user_pmd = PmdOutput(self.sut_node, self.virtio_user) self.save_result_flag = True self.json_obj = dict() - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -57,7 +57,7 @@ class TestPerfVirtioUserLoopback(TestCase): It's more convenient to load suite configuration here than set_up_all in debug mode. """ - self.dut.send_expect("rm ./vhost-net*", "# ") + self.sut_node.send_expect("rm ./vhost-net*", "# ") # test parameters include: frames size, descriptor numbers self.test_parameters = self.get_suite_cfg()["test_parameters"] @@ -140,7 +140,7 @@ class TestPerfVirtioUserLoopback(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -197,8 +197,8 @@ class TestPerfVirtioUserLoopback(TestCase): """ close all session of vhost and vhost-user """ - self.dut.close_session(self.vhost) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost) + self.sut_node.close_session(self.virtio_user) def handle_expected(self): """ @@ -515,7 +515,7 @@ class TestPerfVirtioUserLoopback(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_pf_smoke.py b/tests/TestSuite_pf_smoke.py index 909c7e1b..adb41e3b 100644 --- a/tests/TestSuite_pf_smoke.py +++ b/tests/TestSuite_pf_smoke.py @@ -2,8 +2,8 @@ # Copyright(c) 2021 Intel Corporation # -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .smoke_base import ( @@ -25,36 +25,36 @@ class TestPfSmoke(TestCase): """ # Based on h/w type, choose how many ports to use - self.smoke_dut_ports = self.dut.get_ports(self.nic) + self.smoke_sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.smoke_dut_ports) >= 1, "Insufficient ports") - self.smoke_tester_port = self.tester.get_local_port(self.smoke_dut_ports[0]) - self.smoke_tester_nic = self.tester.get_interface(self.smoke_tester_port) - self.smoke_tester_mac = self.tester.get_mac(self.smoke_dut_ports[0]) - self.smoke_dut_mac = self.dut.get_mac_address(self.smoke_dut_ports[0]) + self.verify(len(self.smoke_sut_ports) >= 1, "Insufficient ports") + self.smoke_tg_port = self.tg_node.get_local_port(self.smoke_sut_ports[0]) + self.smoke_tg_nic = self.tg_node.get_interface(self.smoke_tg_port) + self.smoke_tg_mac = self.tg_node.get_mac(self.smoke_sut_ports[0]) + self.smoke_sut_mac = self.sut_node.get_mac_address(self.smoke_sut_ports[0]) # Verify that enough core - self.cores = self.dut.get_core_list("1S/4C/1T") + self.cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(self.cores is not None, "Insufficient cores for speed testing") # init pkt - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() # set default app parameter - self.pmd_out = PmdOutput(self.dut) - self.ports = [self.dut.ports_info[self.smoke_dut_ports[0]]["pci"]] + self.pmd_out = PmdOutput(self.sut_node) + self.ports = [self.sut_node.ports_info[self.smoke_sut_ports[0]]["pci"]] self.test_func = SmokeTest(self) - self.check_session = self.dut.new_session(suite="pf_smoke_test") + self.check_session = self.sut_node.new_session(suite="pf_smoke_test") def set_up(self): """ Run before each test case. """ - # set tester mtu and testpmd parameter + # set TG mtu and testpmd parameter if self._suite_result.test_case == "test_pf_jumbo_frames": - self.tester.send_expect( - "ifconfig {} mtu {}".format(self.smoke_tester_nic, JUMBO_FRAME_MTU), + self.tg_node.send_expect( + "ifconfig {} mtu {}".format(self.smoke_tg_nic, JUMBO_FRAME_MTU), "# ", ) self.param = ( @@ -75,18 +75,18 @@ class TestPfSmoke(TestCase): self.pmd_out.start_testpmd(cores=self.cores, ports=self.ports, param=self.param) # set default param - self.dut.send_expect("set promisc all off", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) def test_pf_jumbo_frames(self): """ This case aims to test transmitting jumbo frame packet on testpmd with jumbo frame support. """ - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("set verbose 3", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("set verbose 3", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) result = self.test_func.check_jumbo_frames() self.verify(result, "enable disable jumbo frames failed") @@ -94,10 +94,10 @@ class TestPfSmoke(TestCase): """ Check default rss function. """ - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) result = self.test_func.check_rss() self.verify(result, "enable disable rss failed") @@ -105,39 +105,39 @@ class TestPfSmoke(TestCase): """ Check dpdk queue configure. """ - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) result = self.test_func.check_tx_rx_queue() self.verify(result, "check tx rx queue failed") def tear_down(self): self.pmd_out.execute_cmd("stop") - # set tester mtu to default value + # set TG mtu to default value if self._suite_result.test_case == "test_pf_jumbo_frames": - self.tester.send_expect( - "ifconfig {} mtu {}".format(self.smoke_tester_nic, DEFAULT_MTU_VALUE), + self.tg_node.send_expect( + "ifconfig {} mtu {}".format(self.smoke_tg_nic, DEFAULT_MTU_VALUE), "# ", ) # set dpdk queues to launch value if self._suite_result.test_case == "test_pf_tx_rx_queue": - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect( "port config all rxq {}".format(LAUNCH_QUEUE), "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "port config all txq {}".format(LAUNCH_QUEUE), "testpmd> " ) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() def tear_down_all(self): if self.check_session: - self.dut.close_session(self.check_session) + self.sut_node.close_session(self.check_session) self.check_session = None - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_pipeline.py b/tests/TestSuite_pipeline.py index dd23cc97..bfed2b04 100644 --- a/tests/TestSuite_pipeline.py +++ b/tests/TestSuite_pipeline.py @@ -23,15 +23,14 @@ from scapy.sendrecv import sendp, sniff from scapy.utils import hexstr, rdpcap, wrpcap import framework.utils as utils -from framework.crb import Crb -from framework.dut import Dut from framework.exception import VerifyFailure -from framework.packet import Packet +from framework.node import Node from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut from framework.settings import DRIVERS, HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase -from framework.virt_dut import VirtDut +from framework.virt_sut import VirtSut TIMESTAMP = re.compile(r"\d{2}\:\d{2}\:\d{2}\.\d{6}") PAYLOAD = re.compile(r"\t0x([0-9a-fA-F]+): ([0-9a-fA-F ]+)") @@ -97,7 +96,7 @@ class TestPipeline(TestCase): """ param = "" direct_param = r"(\s+)\[ (\S+) in\|out\|inout \]" - out = self.tester.send_expect("tcpdump -h", "# ", trim_whitespace=False) + out = self.tg_node.send_expect("tcpdump -h", "# ", trim_whitespace=False) for line in out.split("\n"): m = re.match(direct_param, line) if m: @@ -117,20 +116,20 @@ class TestPipeline(TestCase): Starts tcpdump in the background to sniff packets that received by interface. """ command = "rm -f /tmp/tcpdump_{0}.pcap".format(interface) - self.tester.send_expect(command, "#") + self.tg_node.send_expect(command, "#") command = "tcpdump -nn -e {0} -w /tmp/tcpdump_{1}.pcap -i {1} {2} 2>/tmp/tcpdump_{1}.out &".format( self.param_flow_dir, interface, filters ) - self.tester.send_expect(command, "# ") + self.tg_node.send_expect(command, "# ") def tcpdump_stop_sniff(self): """ Stops the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "# ") + self.tg_node.send_expect("killall tcpdump", "# ") # For the [pid]+ Done tcpdump... message after killing the process sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "# ") + self.tg_node.send_expect('echo "Cleaning buffer"', "# ") sleep(1) def write_pcap_file(self, pcap_file, pkts): @@ -153,25 +152,25 @@ class TestPipeline(TestCase): Sent pkts that read from the pcap_file. Return the sniff pkts. """ - tx_port = self.tester.get_local_port(self.dut_ports[from_port]) - rx_port = self.tester.get_local_port(self.dut_ports[to_port]) + tx_port = self.tg_node.get_local_port(self.sut_ports[from_port]) + rx_port = self.tg_node.get_local_port(self.sut_ports[to_port]) - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) - self.tester.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") + self.tg_node.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") self.tcpdump_start_sniff(rx_interface, filters) # Prepare the pkts to be sent - self.tester.scapy_foreground() - self.tester.send_expect( + self.tg_node.scapy_foreground() + self.tg_node.send_expect( "text2pcap -q {} /tmp/packet_tx.pcap".format("/tmp/" + in_pcap), "# " ) - self.tester.scapy_append('pkt = rdpcap("/tmp/packet_tx.pcap")') - self.tester.scapy_append('sendp(pkt, iface="{}", count=1)'.format(tx_interface)) - self.tester.scapy_execute() + self.tg_node.scapy_append('pkt = rdpcap("/tmp/packet_tx.pcap")') + self.tg_node.scapy_append('sendp(pkt, iface="{}", count=1)'.format(tx_interface)) + self.tg_node.scapy_execute() self.tcpdump_stop_sniff() - self.tester.send_expect( + self.tg_node.send_expect( "tcpdump -n -r /tmp/tcpdump_{}.pcap -xx > /tmp/packet_rx.txt".format( rx_interface ), @@ -180,59 +179,59 @@ class TestPipeline(TestCase): self.convert_tcpdump_to_text2pcap( "/tmp/packet_rx.txt", "/tmp/packet_rx_rcv.txt" ) - out = self.tester.send_command( + out = self.tg_node.send_command( "diff -sqw /tmp/packet_rx_rcv.txt {}".format("/tmp/" + out_pcap), timeout=0.5, ) if "are identical" not in out: - self.dut.send_expect("^C", "# ") + self.sut_node.send_expect("^C", "# ") self.verify(False, "Output pcap files mismatch error") def send_and_sniff_multiple( self, from_port, to_port, in_pcap, out_pcap, filters, rate=0 ): - self.tester.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") + self.tg_node.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") tx_count = len(from_port) rx_count = len(to_port) tx_port, rx_port, tx_inf, rx_inf = ([] for i in range(4)) for i in range(tx_count): - tx_port.append(self.tester.get_local_port(self.dut_ports[from_port[i]])) - tx_inf.append(self.tester.get_interface(tx_port[i])) + tx_port.append(self.tg_node.get_local_port(self.sut_ports[from_port[i]])) + tx_inf.append(self.tg_node.get_interface(tx_port[i])) for i in range(rx_count): - rx_port.append(self.tester.get_local_port(self.dut_ports[to_port[i]])) - rx_inf.append(self.tester.get_interface(rx_port[i])) + rx_port.append(self.tg_node.get_local_port(self.sut_ports[to_port[i]])) + rx_inf.append(self.tg_node.get_interface(rx_port[i])) self.tcpdump_start_sniff(rx_inf[i], filters[i]) - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() for i in range(tx_count): - self.tester.send_expect( + self.tg_node.send_expect( "text2pcap -q {} /tmp/tx_{}.pcap".format( "/tmp/" + in_pcap[i], tx_inf[i] ), "# ", ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'pkt = rdpcap("/tmp/tx_{}.pcap")'.format(tx_inf[i]) ) if rate: - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pkt, iface="{}", count=1, inter=1./{})'.format( tx_inf[i], rate ) ) else: - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pkt, iface="{}", count=1)'.format(tx_inf[i]) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() self.tcpdump_stop_sniff() mismatch_count = 0 for i in range(rx_count): - self.tester.send_expect( + self.tg_node.send_expect( "tcpdump -n -r /tmp/tcpdump_{}.pcap -xx > /tmp/packet_rx.txt".format( rx_inf[i] ), @@ -244,21 +243,21 @@ class TestPipeline(TestCase): cmd = "diff -sqw /tmp/packet_rx_rcv_{}.txt {}".format( rx_inf[i], "/tmp/" + out_pcap[i] ) - out = self.tester.send_command(cmd, timeout=0.5) + out = self.tg_node.send_command(cmd, timeout=0.5) if "are identical" not in out: mismatch_count += 1 if mismatch_count: - self.dut.send_expect("^C", "# ") + self.sut_node.send_expect("^C", "# ") self.verify(False, "Output pcap files mismatch error") def send_scapy_pkts(self, from_port): - tx_port = self.tester.get_local_port(self.dut_ports[from_port]) - tx_interface = self.tester.get_interface(tx_port) + tx_port = self.tg_node.get_local_port(self.sut_ports[from_port]) + tx_interface = self.tg_node.get_interface(tx_port) - self.tester.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") + self.tg_node.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") pcap_file = "/tmp/packet_tx.pcap" - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() pr = 0 for a in range(192, 255): for b in range(0, 255): @@ -266,7 +265,7 @@ class TestPipeline(TestCase): for d in range(0, 255): my_dst = "{}.{}.{}.{}".format(a, b, c, d) pkt = [ - Ether(dst=self.dut_p0_mac) + Ether(dst=self.sut_p0_mac) / IP(src="0.0.0.2", dst=my_dst) / TCP(sport=100, dport=200) / Raw(load="X" * 6) @@ -275,71 +274,71 @@ class TestPipeline(TestCase): pr += 1 if pr == 50: pr = 0 - self.tester.scapy_append( + self.tg_node.scapy_append( 'pkt = rdpcap("/tmp/packet_tx.pcap")' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pkt, iface="{}", count=1)'.format(tx_interface) ) - self.tester.scapy_execute() - self.tester.send_expect( + self.tg_node.scapy_execute() + self.tg_node.send_expect( "rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# " ) if pr: - self.tester.scapy_append('pkt = rdpcap("/tmp/packet_tx.pcap")') - self.tester.scapy_append( + self.tg_node.scapy_append('pkt = rdpcap("/tmp/packet_tx.pcap")') + self.tg_node.scapy_append( 'sendp(pkt, iface="{}", count=1)'.format(tx_interface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def setup_env(self, port_nums, driver): """ This is to set up vf environment. The pf is bound to dpdk driver. """ - self.dut.send_expect("modprobe vfio-pci", "# ") + self.sut_node.send_expect("modprobe vfio-pci", "# ") if driver == "default": - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() # one PF generate one VF for port_num in range(port_nums): - self.dut.generate_sriov_vfs_by_port(self.dut_ports[port_num], 1, driver) + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[port_num], 1, driver) self.sriov_vfs_port.append( - self.dut.ports_info[self.dut_ports[port_num]]["vfs_port"] + self.sut_node.ports_info[self.sut_ports[port_num]]["vfs_port"] ) if driver == "default": - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf0_interface, self.vf0_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf1_interface, self.vf1_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf2_interface, self.vf2_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf3_interface, self.vf3_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf0_interface, "# ", 3 ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf1_interface, "# ", 3 ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf2_interface, "# ", 3 ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % self.pf3_interface, "# ", 3 ) @@ -359,69 +358,69 @@ class TestPipeline(TestCase): self.session_secondary.send_expect(cmd, "# ", 20) time.sleep(5) if driver == self.drivername: - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(5) for port_num in range(port_nums): - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[port_num]) + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[port_num]) def set_up_all(self): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() self.port_nums = 4 self.verify( - len(self.dut_ports) >= self.port_nums, + len(self.sut_ports) >= self.port_nums, "Insufficient ports for speed testing", ) - self.dut_p0_pci = self.dut.get_port_pci(self.dut_ports[0]) - self.dut_p1_pci = self.dut.get_port_pci(self.dut_ports[1]) - self.dut_p2_pci = self.dut.get_port_pci(self.dut_ports[2]) - self.dut_p3_pci = self.dut.get_port_pci(self.dut_ports[3]) + self.sut_p0_pci = self.sut_node.get_port_pci(self.sut_ports[0]) + self.sut_p1_pci = self.sut_node.get_port_pci(self.sut_ports[1]) + self.sut_p2_pci = self.sut_node.get_port_pci(self.sut_ports[2]) + self.sut_p3_pci = self.sut_node.get_port_pci(self.sut_ports[3]) - self.dut_p0_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.dut_p1_mac = self.dut.get_mac_address(self.dut_ports[1]) - self.dut_p2_mac = self.dut.get_mac_address(self.dut_ports[2]) - self.dut_p3_mac = self.dut.get_mac_address(self.dut_ports[3]) + self.sut_p0_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.sut_p1_mac = self.sut_node.get_mac_address(self.sut_ports[1]) + self.sut_p2_mac = self.sut_node.get_mac_address(self.sut_ports[2]) + self.sut_p3_mac = self.sut_node.get_mac_address(self.sut_ports[3]) - self.pf0_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_interface = self.dut.ports_info[self.dut_ports[1]]["intf"] - self.pf2_interface = self.dut.ports_info[self.dut_ports[2]]["intf"] - self.pf3_interface = self.dut.ports_info[self.dut_ports[3]]["intf"] + self.pf0_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_interface = self.sut_node.ports_info[self.sut_ports[1]]["intf"] + self.pf2_interface = self.sut_node.ports_info[self.sut_ports[2]]["intf"] + self.pf3_interface = self.sut_node.ports_info[self.sut_ports[3]]["intf"] self.vf0_mac = "00:11:22:33:44:55" self.vf1_mac = "00:11:22:33:44:56" self.vf2_mac = "00:11:22:33:44:57" self.vf3_mac = "00:11:22:33:44:58" - ports = [self.dut_p0_pci, self.dut_p1_pci, self.dut_p2_pci, self.dut_p3_pci] - self.eal_para = self.dut.create_eal_parameters( + ports = [self.sut_p0_pci, self.sut_p1_pci, self.sut_p2_pci, self.sut_p3_pci] + self.eal_para = self.sut_node.create_eal_parameters( cores=list(range(4)), ports=ports ) self.sriov_vfs_port = [] - self.session_secondary = self.dut.new_session() + self.session_secondary = self.sut_node.new_session() - out = self.dut.build_dpdk_apps("./examples/pipeline") + out = self.sut_node.build_dpdk_apps("./examples/pipeline") self.verify("Error" not in out, "Compilation error") - self.app_pipeline_path = self.dut.apps_name["pipeline"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_pipeline_path = self.sut_node.apps_name["pipeline"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.param_flow_dir = self.get_flow_direction_param_of_tcpdump() # update the ./dep/pipeline.tar.gz file PIPELINE_TAR_FILE = DEP_DIR + "pipeline.tar.gz" - self.tester.send_expect("rm -rf /tmp/pipeline", "# ") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf /tmp/pipeline", "# ") + self.tg_node.send_expect( "tar -zxf {} --directory /tmp".format(PIPELINE_TAR_FILE), "# ", 20 ) - # copy the ./dep/pipeline.tar.gz file to DUT - self.dut.send_expect("rm -rf /tmp/pipeline.tar.gz /tmp/pipeline", "# ", 20) + # copy the ./dep/pipeline.tar.gz file to SUT + self.sut_node.send_expect("rm -rf /tmp/pipeline.tar.gz /tmp/pipeline", "# ", 20) self.session_secondary.copy_file_to("dep/pipeline.tar.gz", "/tmp/") - self.dut.send_expect("tar -zxf /tmp/pipeline.tar.gz --directory /tmp", "# ", 20) + self.sut_node.send_expect("tar -zxf /tmp/pipeline.tar.gz --directory /tmp", "# ", 20) # update environment variable for the performance improvement - self.dut.send_expect( + self.sut_node.send_expect( "export RTE_INSTALL_DIR={}".format(DIR_RTE_INSTALL_DIR), "#" ) @@ -446,17 +445,17 @@ class TestPipeline(TestCase): # print('Rxd: ' + response) if "pipeline>" not in response: s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "CLI Response Error") else: return s except socket.error as err: print("socket connection failed with error %s" % (err)) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "Failed to connect to server") except socket.error as err: print("socket creation failed with error %s" % (err)) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "Failed to create socket") def socket_send_cmd(self, socket, cmd, expected_rsp): @@ -468,50 +467,50 @@ class TestPipeline(TestCase): # print('Rxd: ' + response) if expected_rsp not in response: socket.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "CLI Response Error") def run_dpdk_app(self, cli_file, exp_out="PIPELINE0 enable"): cmd = 'test -f {} && echo "File exists!"'.format(cli_file) - self.dut.send_expect(cmd, "File exists!", 1) + self.sut_node.send_expect(cmd, "File exists!", 1) try: - cmd = "sed -i -e 's/0000:00:04.0/%s/' {}".format(cli_file) % self.dut_p0_pci - self.dut.send_expect(cmd, "# ", 20) - cmd = "sed -i -e 's/0000:00:05.0/%s/' {}".format(cli_file) % self.dut_p1_pci - self.dut.send_expect(cmd, "# ", 20) - cmd = "sed -i -e 's/0000:00:06.0/%s/' {}".format(cli_file) % self.dut_p2_pci - self.dut.send_expect(cmd, "# ", 20) - cmd = "sed -i -e 's/0000:00:07.0/%s/' {}".format(cli_file) % self.dut_p3_pci - self.dut.send_expect(cmd, "# ", 20) + cmd = "sed -i -e 's/0000:00:04.0/%s/' {}".format(cli_file) % self.sut_p0_pci + self.sut_node.send_expect(cmd, "# ", 20) + cmd = "sed -i -e 's/0000:00:05.0/%s/' {}".format(cli_file) % self.sut_p1_pci + self.sut_node.send_expect(cmd, "# ", 20) + cmd = "sed -i -e 's/0000:00:06.0/%s/' {}".format(cli_file) % self.sut_p2_pci + self.sut_node.send_expect(cmd, "# ", 20) + cmd = "sed -i -e 's/0000:00:07.0/%s/' {}".format(cli_file) % self.sut_p3_pci + self.sut_node.send_expect(cmd, "# ", 20) cmd = "{0} {1} -- -s {2}".format( self.app_pipeline_path, self.eal_para, cli_file ) - self.dut.send_expect(cmd, exp_out, 60) + self.sut_node.send_expect(cmd, exp_out, 60) except Exception: - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "ERROR in running DPDK application") def send_pkts(self, from_port, to_port, in_pcap): """ Send pkts read from the input pcap file. """ - tx_port = self.tester.get_local_port(self.dut_ports[from_port]) - rx_port = self.tester.get_local_port(self.dut_ports[to_port]) + tx_port = self.tg_node.get_local_port(self.sut_ports[from_port]) + rx_port = self.tg_node.get_local_port(self.sut_ports[to_port]) - tx_interface = self.tester.get_interface(tx_port) - rx_interface = self.tester.get_interface(rx_port) + tx_interface = self.tg_node.get_interface(tx_port) + rx_interface = self.tg_node.get_interface(rx_port) - self.tester.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") + self.tg_node.send_expect("rm -f /tmp/*.txt /tmp/*.pcap /tmp/*.out", "# ") # Prepare the pkts to be sent - self.tester.scapy_foreground() - self.tester.send_expect( + self.tg_node.scapy_foreground() + self.tg_node.send_expect( "text2pcap -q {} /tmp/packet_tx.pcap".format("/tmp/" + in_pcap), "# " ) - self.tester.scapy_append('pkt = rdpcap("/tmp/packet_tx.pcap")') - self.tester.scapy_append('sendp(pkt, iface="{}", count=1)'.format(tx_interface)) - self.tester.scapy_execute() + self.tg_node.scapy_append('pkt = rdpcap("/tmp/packet_tx.pcap")') + self.tg_node.scapy_append('sendp(pkt, iface="{}", count=1)'.format(tx_interface)) + self.tg_node.scapy_execute() def test_rx_tx_001(self): @@ -524,7 +523,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_001(self): @@ -537,7 +536,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_002(self): @@ -550,7 +549,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_003(self): @@ -563,7 +562,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_004(self): @@ -576,7 +575,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_005(self): @@ -589,7 +588,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_006(self): @@ -602,7 +601,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_007(self): @@ -615,7 +614,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_008(self): @@ -628,7 +627,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_extract_emit_009(self): @@ -641,7 +640,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_001(self): @@ -654,7 +653,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_002(self): @@ -667,7 +666,7 @@ class TestPipeline(TestCase): self.send_and_sniff_pkts(1, 0, in_pcap, out_pcap, "tcp") self.send_and_sniff_pkts(2, 0, in_pcap, out_pcap, "tcp") self.send_and_sniff_pkts(3, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_003(self): @@ -680,7 +679,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_004(self): @@ -693,7 +692,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_005(self): @@ -706,7 +705,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_006(self): @@ -719,7 +718,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_007(self): @@ -732,7 +731,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_and_008(self): @@ -745,7 +744,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_001(self): @@ -758,7 +757,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_002(self): @@ -771,7 +770,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_003(self): @@ -784,7 +783,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_004(self): @@ -797,7 +796,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_005(self): @@ -810,7 +809,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_006(self): @@ -823,7 +822,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_007(self): @@ -836,7 +835,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_or_008(self): @@ -849,7 +848,7 @@ class TestPipeline(TestCase): self.send_and_sniff_pkts(1, 1, in_pcap, out_pcap, "tcp") self.send_and_sniff_pkts(2, 3, in_pcap, out_pcap, "tcp") self.send_and_sniff_pkts(3, 3, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_001(self): @@ -862,7 +861,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [1, 0, 3, 2] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_002(self): @@ -875,7 +874,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_003(self): @@ -888,7 +887,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_004(self): @@ -901,7 +900,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_005(self): @@ -914,7 +913,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_006(self): @@ -927,7 +926,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [1, 0, 3, 2] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_007(self): @@ -940,7 +939,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_xor_008(self): @@ -953,7 +952,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_001(self): @@ -966,7 +965,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_002(self): @@ -979,7 +978,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_003(self): @@ -992,7 +991,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2] rx_port = [1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_004(self): @@ -1005,7 +1004,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_005(self): @@ -1018,7 +1017,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_006(self): @@ -1031,7 +1030,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_007(self): @@ -1044,7 +1043,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_add_008(self): @@ -1057,7 +1056,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_001(self): @@ -1070,7 +1069,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_002(self): @@ -1083,7 +1082,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_003(self): @@ -1096,7 +1095,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_004(self): @@ -1109,7 +1108,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_005(self): @@ -1122,7 +1121,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_006(self): @@ -1135,7 +1134,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_007(self): @@ -1148,7 +1147,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shl_008(self): @@ -1161,7 +1160,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_001(self): @@ -1174,7 +1173,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_002(self): @@ -1187,7 +1186,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_003(self): @@ -1200,7 +1199,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_004(self): @@ -1213,7 +1212,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_005(self): @@ -1226,7 +1225,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_006(self): @@ -1239,7 +1238,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_007(self): @@ -1252,7 +1251,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_shr_008(self): @@ -1265,7 +1264,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_001(self): @@ -1278,7 +1277,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_002(self): @@ -1291,7 +1290,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_003(self): @@ -1304,7 +1303,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_004(self): @@ -1317,7 +1316,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_005(self): @@ -1330,7 +1329,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_006(self): @@ -1343,7 +1342,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_007(self): @@ -1356,7 +1355,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_sub_008(self): @@ -1369,7 +1368,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_001(self): @@ -1382,7 +1381,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_002(self): @@ -1395,7 +1394,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_003(self): @@ -1408,7 +1407,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_004(self): @@ -1421,7 +1420,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_005(self): @@ -1434,7 +1433,7 @@ class TestPipeline(TestCase): self.send_and_sniff_pkts(1, 0, in_pcap, out_pcap, "tcp") self.send_and_sniff_pkts(2, 0, in_pcap, out_pcap, "tcp") self.send_and_sniff_pkts(3, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_007(self): @@ -1447,7 +1446,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_mov_008(self): @@ -1460,7 +1459,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_table_001(self): @@ -1473,7 +1472,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_vxlan_001(self): @@ -1495,7 +1494,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_001(self): @@ -1508,7 +1507,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_002(self): @@ -1521,7 +1520,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_003(self): @@ -1534,7 +1533,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_004(self): @@ -1547,7 +1546,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_005(self): @@ -1560,7 +1559,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_006(self): @@ -1573,7 +1572,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_007(self): @@ -1586,7 +1585,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_dma_008(self): @@ -1599,7 +1598,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_001(self): @@ -1612,7 +1611,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_002(self): @@ -1625,7 +1624,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_003(self): @@ -1638,7 +1637,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_004(self): @@ -1651,7 +1650,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_005(self): @@ -1664,7 +1663,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_006(self): @@ -1677,7 +1676,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_007(self): @@ -1690,7 +1689,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_008(self): @@ -1703,7 +1702,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_009(self): @@ -1716,7 +1715,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_010(self): @@ -1729,7 +1728,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_011(self): @@ -1742,7 +1741,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_012(self): @@ -1755,7 +1754,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_013(self): @@ -1768,7 +1767,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_014(self): @@ -1781,7 +1780,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_015(self): @@ -1794,7 +1793,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_016(self): @@ -1807,7 +1806,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_017(self): @@ -1820,7 +1819,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_018(self): @@ -1833,7 +1832,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_019(self): @@ -1846,7 +1845,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_020(self): @@ -1859,7 +1858,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_021(self): @@ -1872,7 +1871,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_022(self): @@ -1885,7 +1884,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_023(self): @@ -1898,7 +1897,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_024(self): @@ -1911,7 +1910,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_025(self): @@ -1924,7 +1923,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_026(self): @@ -1937,7 +1936,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_027(self): @@ -1950,7 +1949,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_028(self): @@ -1963,7 +1962,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_029(self): @@ -1976,7 +1975,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_030(self): @@ -1989,7 +1988,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_031(self): @@ -2002,7 +2001,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_032(self): @@ -2015,7 +2014,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_033(self): @@ -2028,7 +2027,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_034(self): @@ -2041,7 +2040,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_035(self): @@ -2054,7 +2053,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_036(self): @@ -2067,7 +2066,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_037(self): @@ -2080,7 +2079,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_038(self): @@ -2093,7 +2092,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_039(self): @@ -2106,7 +2105,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_040(self): @@ -2119,7 +2118,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_041(self): @@ -2132,7 +2131,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_042(self): @@ -2145,7 +2144,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_043(self): @@ -2158,7 +2157,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_044(self): @@ -2171,7 +2170,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_045(self): @@ -2184,7 +2183,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_046(self): @@ -2197,7 +2196,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_047(self): @@ -2210,7 +2209,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_048(self): @@ -2223,7 +2222,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_049(self): @@ -2236,7 +2235,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_050(self): @@ -2249,7 +2248,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_051(self): @@ -2262,7 +2261,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_052(self): @@ -2275,7 +2274,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_053(self): @@ -2288,7 +2287,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_054(self): @@ -2301,7 +2300,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_jump_055(self): @@ -2314,7 +2313,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_ckadd_001(self): @@ -2327,7 +2326,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_ckadd_009(self): @@ -2340,7 +2339,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_ckadd_010(self): @@ -2353,7 +2352,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_cksub_001(self): @@ -2366,7 +2365,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_invalidate_001(self): @@ -2379,7 +2378,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_validate_001(self): @@ -2392,7 +2391,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_table_002(self): @@ -2521,7 +2520,7 @@ class TestPipeline(TestCase): self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_table_003(self): @@ -2655,7 +2654,7 @@ class TestPipeline(TestCase): self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_table_004(self): @@ -2668,7 +2667,7 @@ class TestPipeline(TestCase): tx_port = [0, 1, 2, 3] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_001(self): @@ -2694,7 +2693,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_002(self): @@ -2718,7 +2717,7 @@ class TestPipeline(TestCase): in_pcap = "pipeline/reg_002/pcap_files/in_1.txt" out_pcap = "pipeline/reg_002/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_003(self): @@ -2742,7 +2741,7 @@ class TestPipeline(TestCase): in_pcap = "pipeline/reg_003/pcap_files/in_1.txt" out_pcap = "pipeline/reg_003/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_004(self): @@ -2766,7 +2765,7 @@ class TestPipeline(TestCase): in_pcap = "pipeline/reg_004/pcap_files/in_1.txt" out_pcap = "pipeline/reg_004/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_005(self): @@ -2790,7 +2789,7 @@ class TestPipeline(TestCase): in_pcap = "pipeline/reg_005/pcap_files/in_1.txt" out_pcap = "pipeline/reg_005/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_006(self): @@ -2824,7 +2823,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_007(self): @@ -2858,7 +2857,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_008(self): @@ -2892,7 +2891,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_009(self): @@ -2926,7 +2925,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_010(self): @@ -2945,7 +2944,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_010/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -2960,7 +2959,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x6\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_011(self): @@ -2979,7 +2978,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_011/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -2994,7 +2993,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x6\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_012(self): @@ -3013,7 +3012,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_012/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3028,7 +3027,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_013(self): @@ -3049,7 +3048,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0x06\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_013/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3066,7 +3065,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x9876543210987654\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_014(self): @@ -3085,7 +3084,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_014/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3100,7 +3099,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x6\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_015(self): @@ -3119,7 +3118,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_015/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3134,7 +3133,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x6\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_016(self): @@ -3155,7 +3154,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_016/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3172,7 +3171,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_017(self): @@ -3193,7 +3192,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_017/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3210,7 +3209,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_018(self): @@ -3231,7 +3230,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_018/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3248,7 +3247,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_019(self): @@ -3269,7 +3268,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_019/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3286,7 +3285,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_020(self): @@ -3305,7 +3304,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_020/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3320,7 +3319,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x1234\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_021(self): @@ -3341,7 +3340,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_021/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3358,7 +3357,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_022(self): @@ -3377,7 +3376,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0x7f\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_022/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3392,7 +3391,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x6\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_023(self): @@ -3413,7 +3412,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_023/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3430,7 +3429,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_024(self): @@ -3451,7 +3450,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xf7\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_024/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3468,7 +3467,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x12\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_025(self): @@ -3487,7 +3486,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regrd REG_ARR_1 0xd1\n" self.socket_send_cmd(s, CLI_CMD, "0x0\npipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_025/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3502,7 +3501,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x1234\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_026(self): @@ -3521,7 +3520,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_026/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3536,7 +3535,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_027(self): @@ -3555,7 +3554,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_027/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3570,7 +3569,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_028(self): @@ -3589,7 +3588,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_028/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3604,7 +3603,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_029(self): @@ -3623,7 +3622,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_029/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3638,7 +3637,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_030(self): @@ -3657,7 +3656,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_030/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3672,7 +3671,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_031(self): @@ -3691,7 +3690,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0x7f 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_031/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3706,7 +3705,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_032(self): @@ -3727,7 +3726,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_032/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3744,7 +3743,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_033(self): @@ -3765,7 +3764,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_033/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3782,7 +3781,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_034(self): @@ -3803,7 +3802,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_034/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3820,7 +3819,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_035(self): @@ -3841,7 +3840,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_035/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3858,7 +3857,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_036(self): @@ -3879,7 +3878,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_036/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3896,7 +3895,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_037(self): @@ -3917,7 +3916,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_037/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3934,7 +3933,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_038(self): @@ -3953,7 +3952,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_038/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -3968,7 +3967,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_039(self): @@ -3989,7 +3988,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_039/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4006,7 +4005,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_040(self): @@ -4027,7 +4026,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_040/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4044,7 +4043,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_041(self): @@ -4065,7 +4064,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xf7 0x1f\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_041/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4082,7 +4081,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0x25\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_042(self): @@ -4101,7 +4100,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_042/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4116,7 +4115,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_043(self): @@ -4135,7 +4134,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_043/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4150,7 +4149,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_044(self): @@ -4169,7 +4168,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0x7f 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_044/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4184,7 +4183,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_reg_045(self): @@ -4203,7 +4202,7 @@ class TestPipeline(TestCase): CLI_CMD = "pipeline PIPELINE0 regwr REG_ARR_1 0xd1 0xff2\n" self.socket_send_cmd(s, CLI_CMD, "pipeline> ") - # Send packet to DUT to update the register array + # Send packet to SUT to update the register array in_pcap = "pipeline/reg_045/pcap_files/in_1.txt" self.send_pkts(0, 0, in_pcap) @@ -4218,7 +4217,7 @@ class TestPipeline(TestCase): self.socket_send_cmd(s, CLI_CMD, "0xff8\npipeline> ") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_001(self): @@ -4270,7 +4269,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_001/pcap_files/out_43.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 1000) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_002(self): @@ -4292,7 +4291,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_002/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_003(self): @@ -4314,7 +4313,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_003/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_004(self): @@ -4336,7 +4335,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_004/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_005(self): @@ -4358,7 +4357,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_005/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_006(self): @@ -4380,7 +4379,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_006/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_007(self): @@ -4402,7 +4401,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_007/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_008(self): @@ -4424,7 +4423,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_008/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_009(self): @@ -4446,7 +4445,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_009/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_010(self): @@ -4468,7 +4467,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_010/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_011(self): @@ -4490,7 +4489,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_011/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_012(self): @@ -4512,7 +4511,7 @@ class TestPipeline(TestCase): out_pcap_3 = "pipeline/met_012/pcap_files/out_23.txt" out_pcap = [out_pcap_1, out_pcap_2, out_pcap_3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 10) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_013(self): @@ -4528,7 +4527,7 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 1000) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_014(self): @@ -4544,7 +4543,7 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 1000) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_met_015(self): @@ -4560,72 +4559,72 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters, 1000) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_ring_port_001(self): cli_file = "/tmp/pipeline/ring_port_001/ring_port_001.cli" cmd = 'test -f {} && echo "File exists!"'.format(cli_file) - self.dut.send_expect(cmd, "File exists!", 10) + self.sut_node.send_expect(cmd, "File exists!", 10) try: - cmd = "sed -i -e 's/0000:00:04.0/%s/' {}".format(cli_file) % self.dut_p0_pci - self.dut.send_expect(cmd, "# ", 10) - cmd = "sed -i -e 's/0000:00:05.0/%s/' {}".format(cli_file) % self.dut_p1_pci - self.dut.send_expect(cmd, "# ", 10) - cmd = "sed -i -e 's/0000:00:06.0/%s/' {}".format(cli_file) % self.dut_p2_pci - self.dut.send_expect(cmd, "# ", 10) - cmd = "sed -i -e 's/0000:00:07.0/%s/' {}".format(cli_file) % self.dut_p3_pci - self.dut.send_expect(cmd, "# ", 10) - - ports = [self.dut_p0_pci, self.dut_p1_pci, self.dut_p2_pci, self.dut_p3_pci] + cmd = "sed -i -e 's/0000:00:04.0/%s/' {}".format(cli_file) % self.sut_p0_pci + self.sut_node.send_expect(cmd, "# ", 10) + cmd = "sed -i -e 's/0000:00:05.0/%s/' {}".format(cli_file) % self.sut_p1_pci + self.sut_node.send_expect(cmd, "# ", 10) + cmd = "sed -i -e 's/0000:00:06.0/%s/' {}".format(cli_file) % self.sut_p2_pci + self.sut_node.send_expect(cmd, "# ", 10) + cmd = "sed -i -e 's/0000:00:07.0/%s/' {}".format(cli_file) % self.sut_p3_pci + self.sut_node.send_expect(cmd, "# ", 10) + + ports = [self.sut_p0_pci, self.sut_p1_pci, self.sut_p2_pci, self.sut_p3_pci] vdevs = ["net_ring0"] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=list(range(4)), ports=ports, vdevs=vdevs ) cmd = "{0} {1} -- -s {2}".format( self.app_pipeline_path, eal_params, cli_file ) - self.dut.send_expect(cmd, "PIPELINE0 enable", 60) + self.sut_node.send_expect(cmd, "PIPELINE0 enable", 60) except Exception: - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "ERROR in running DPDK application") in_pcap = "pipeline/ring_port_001/pcap_files/in_1.txt" out_pcap = "pipeline/ring_port_001/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 1, in_pcap, out_pcap, "udp") - self.dut.send_expect("^C", "# ", 10) + self.sut_node.send_expect("^C", "# ", 10) def test_ring_port_002(self): cli_file = "/tmp/pipeline/ring_port_002/ring_port_002.cli" cmd = 'test -f {} && echo "File exists!"'.format(cli_file) - self.dut.send_expect(cmd, "File exists!", 10) + self.sut_node.send_expect(cmd, "File exists!", 10) try: - cmd = "sed -i -e 's/0000:00:04.0/%s/' {}".format(cli_file) % self.dut_p0_pci - self.dut.send_expect(cmd, "# ", 10) - cmd = "sed -i -e 's/0000:00:05.0/%s/' {}".format(cli_file) % self.dut_p1_pci - self.dut.send_expect(cmd, "# ", 10) - cmd = "sed -i -e 's/0000:00:06.0/%s/' {}".format(cli_file) % self.dut_p2_pci - self.dut.send_expect(cmd, "# ", 10) - cmd = "sed -i -e 's/0000:00:07.0/%s/' {}".format(cli_file) % self.dut_p3_pci - self.dut.send_expect(cmd, "# ", 10) - - ports = [self.dut_p0_pci, self.dut_p1_pci, self.dut_p2_pci, self.dut_p3_pci] - eal_params = self.dut.create_eal_parameters( + cmd = "sed -i -e 's/0000:00:04.0/%s/' {}".format(cli_file) % self.sut_p0_pci + self.sut_node.send_expect(cmd, "# ", 10) + cmd = "sed -i -e 's/0000:00:05.0/%s/' {}".format(cli_file) % self.sut_p1_pci + self.sut_node.send_expect(cmd, "# ", 10) + cmd = "sed -i -e 's/0000:00:06.0/%s/' {}".format(cli_file) % self.sut_p2_pci + self.sut_node.send_expect(cmd, "# ", 10) + cmd = "sed -i -e 's/0000:00:07.0/%s/' {}".format(cli_file) % self.sut_p3_pci + self.sut_node.send_expect(cmd, "# ", 10) + + ports = [self.sut_p0_pci, self.sut_p1_pci, self.sut_p2_pci, self.sut_p3_pci] + eal_params = self.sut_node.create_eal_parameters( cores=list(range(4)), ports=ports ) cmd = "{0} {1} -- -s {2}".format( self.app_pipeline_path, eal_params, cli_file ) - self.dut.send_expect(cmd, "PIPELINE0 enable", 60) + self.sut_node.send_expect(cmd, "PIPELINE0 enable", 60) except Exception: - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) self.verify(0, "ERROR in running DPDK application") in_pcap = "pipeline/ring_port_002/pcap_files/in_1.txt" out_pcap = "pipeline/ring_port_002/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 0, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 10) + self.sut_node.send_expect("^C", "# ", 10) def test_u100_001(self): @@ -4666,7 +4665,7 @@ class TestPipeline(TestCase): out_pcap = [base_dir + s for s in out_pcap] filters = ["igmp"] * 4 self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_u100_002(self): @@ -4715,7 +4714,7 @@ class TestPipeline(TestCase): out_pcap = [base_dir + s for s in out_pcap] filters = ["tcp"] * 4 self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_u100_003(self): @@ -4764,7 +4763,7 @@ class TestPipeline(TestCase): out_pcap = [base_dir + s for s in out_pcap] filters = ["tcp"] * 4 self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_lpm_001(self): @@ -4829,7 +4828,7 @@ class TestPipeline(TestCase): self.send_and_sniff_pkts(1, 0, in_pcap, out_pcap, "tcp") s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_lpm_002(self): @@ -4874,7 +4873,7 @@ class TestPipeline(TestCase): self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_lpm_003(self): @@ -4898,7 +4897,7 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_lpm_004(self): @@ -4919,7 +4918,7 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_selector_001(self): @@ -5058,7 +5057,7 @@ class TestPipeline(TestCase): self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_selector_002(self): @@ -5075,7 +5074,7 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_scapy_pkt_gen(self): @@ -5097,7 +5096,7 @@ class TestPipeline(TestCase): tx_port = [0] rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_varbit_001(self): @@ -5107,7 +5106,7 @@ class TestPipeline(TestCase): in_pcap = "pipeline/varbit_001/pcap_files/in_1.txt" out_pcap = "pipeline/varbit_001/pcap_files/out_1.txt" self.send_and_sniff_pkts(0, 1, in_pcap, out_pcap, "tcp") - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_learner_001(self): @@ -5138,7 +5137,7 @@ class TestPipeline(TestCase): tx_port = [3] rx_port = [3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_learner_002(self): @@ -5169,7 +5168,7 @@ class TestPipeline(TestCase): tx_port = [3] rx_port = [3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_annotation_001(self): @@ -5183,7 +5182,7 @@ class TestPipeline(TestCase): rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_annotation_002(self): @@ -5197,7 +5196,7 @@ class TestPipeline(TestCase): rx_port = [0, 1, 2, 3] self.send_and_sniff_multiple(tx_port, rx_port, in_pcap, out_pcap, filters) - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_annotation_003(self): @@ -5214,7 +5213,7 @@ class TestPipeline(TestCase): ) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_annotation_004(self): @@ -5239,7 +5238,7 @@ class TestPipeline(TestCase): ) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def test_annotation_005(self): @@ -5256,7 +5255,7 @@ class TestPipeline(TestCase): ) s.close() - self.dut.send_expect("^C", "# ", 20) + self.sut_node.send_expect("^C", "# ", 20) def tear_down(self): """ @@ -5268,5 +5267,5 @@ class TestPipeline(TestCase): """ Run after each test suite. """ - self.dut.close_session(self.session_secondary) - self.dut.kill_all() + self.sut_node.close_session(self.session_secondary) + self.sut_node.kill_all() diff --git a/tests/TestSuite_pmd.py b/tests/TestSuite_pmd.py index 5ffdc58c..9f313ddf 100644 --- a/tests/TestSuite_pmd.py +++ b/tests/TestSuite_pmd.py @@ -15,10 +15,10 @@ from time import sleep import framework.utils as utils import nics.perf_report as perf_report -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import FOLDERS, HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from nics.system_info import SystemInfo @@ -46,27 +46,27 @@ class TestPmd(TestCase): self.blocklist = "" # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports() - if self.dut.get_os_type() == "linux": - # Get dut system information - port_num = self.dut_ports[0] - pci_device_id = self.dut.ports_info[port_num]["pci"] - ori_driver = self.dut.ports_info[port_num]["port"].get_nic_driver() - self.dut.ports_info[port_num]["port"].bind_driver() - - sut = SystemInfo(self.dut, pci_device_id) + self.sut_ports = self.sut_node.get_ports() + if self.sut_node.get_os_type() == "linux": + # Get SUT system information + port_num = self.sut_ports[0] + pci_device_id = self.sut_node.ports_info[port_num]["pci"] + ori_driver = self.sut_node.ports_info[port_num]["port"].get_nic_driver() + self.sut_node.ports_info[port_num]["port"].bind_driver() + + sut = SystemInfo(self.sut_node, pci_device_id) if self.nic not in ["cavium_a063", "cavium_a064"]: self.system_info = sut.get_system_info() self.nic_info = sut.get_nic_info() - self.dut.ports_info[port_num]["port"].bind_driver(ori_driver) + self.sut_node.ports_info[port_num]["port"].bind_driver(ori_driver) ###### self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["tcp"] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) # get dts output path if self.logger.log_path.startswith(os.sep): @@ -75,7 +75,7 @@ class TestPmd(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -90,11 +90,11 @@ class TestPmd(TestCase): self.perf_results["header"] = [] self.perf_results["data"] = [] - if len(self.dut_ports) >= 4: + if len(self.sut_ports) >= 4: self.pmd_performance_4ports() else: self.verify( - len(self.dut_ports) >= 2, + len(self.sut_ports) >= 2, "Insufficient ports for 2 ports performance test", ) self.pmd_performance_2ports() @@ -118,7 +118,7 @@ class TestPmd(TestCase): self.verify(html_msg is not None, "generate html report error") subject = "Single core performance test with %d ports %s -- %s" % ( - len(self.dut_ports), + len(self.sut_ports), self.nic, datetime.now().strftime("%Y-%m-%d %H:%M"), ) @@ -131,35 +131,35 @@ class TestPmd(TestCase): """ PMD Performance Benchmarking with 4 ports. """ - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] pcap = os.sep.join([self.output_path, "test.pcap"]) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), pcap, ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[2]), - self.tester.get_local_port(self.dut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), pcap, ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), pcap, ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[3]), - self.tester.get_local_port(self.dut_ports[2]), + self.tg_node.get_local_port(self.sut_ports[3]), + self.tg_node.get_local_port(self.sut_ports[2]), pcap, ) ) @@ -168,7 +168,7 @@ class TestPmd(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + core_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) if len(core_list) > 4: queues = len(core_list) / 4 @@ -176,7 +176,7 @@ class TestPmd(TestCase): queues = 1 core_mask = utils.create_mask(core_list) - port_mask = utils.create_mask(self.dut.get_ports()) + port_mask = utils.create_mask(self.sut_node.get_ports()) self.pmdout.start_testpmd( core_config, @@ -191,31 +191,31 @@ class TestPmd(TestCase): self.logger.info(info) self.rst_report(command_line + "\n\n", frame=True, annex=True) - # self.dut.send_expect("set fwd mac", "testpmd> ", 100) - self.dut.send_expect("start", "testpmd> ", 100) + # self.sut_node.send_expect("set fwd mac", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ", 100) for frame_size in self.frame_sizes: wirespeed = self.wirespeed(self.nic, frame_size, 4) # create pcap file self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() vm_config = self.set_fields() # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config, self.tester.pktgen + tgen_input, 100, vm_config, self.tg_node.perf_tg ) traffic_opt = { "duration": 60, } - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -224,8 +224,8 @@ class TestPmd(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) sleep(5) for n in range(len(self.test_cycles)): @@ -255,22 +255,22 @@ class TestPmd(TestCase): PMD Performance Benchmarking with 2 ports. """ - all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) # prepare traffic generator input tgen_input = [] pcap = os.sep.join([self.output_path, "test.pcap"]) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), pcap, ) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[1]), - self.tester.get_local_port(self.dut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), pcap, ) ) @@ -279,7 +279,7 @@ class TestPmd(TestCase): for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - core_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + core_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) if len(core_list) > 2: queues = len(core_list) / 2 @@ -287,7 +287,7 @@ class TestPmd(TestCase): queues = 1 core_mask = utils.create_mask(core_list) - port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) # self.pmdout.start_testpmd("all", "--coremask=%s --rxq=%d --txq=%d --portmask=%s" % (core_mask, queues, queues, port_mask)) self.pmdout.start_testpmd( @@ -303,7 +303,7 @@ class TestPmd(TestCase): self.rst_report(info, annex=True) self.rst_report(command_line + "\n\n", frame=True, annex=True) - self.dut.send_expect("start", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ", 100) for frame_size in self.frame_sizes: wirespeed = self.wirespeed(self.nic, frame_size, 2) @@ -311,25 +311,25 @@ class TestPmd(TestCase): # create pcap file self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(src="52:00:00:00:00:00")/IP(src="1.2.3.4",dst="1.1.1.1")/TCP()/("X"*%d)])' % (pcap, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # run traffic generator vm_config = self.set_fields() # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config, self.tester.pktgen + tgen_input, 100, vm_config, self.tg_node.perf_tg ) traffic_opt = { "duration": 60, } - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -338,8 +338,8 @@ class TestPmd(TestCase): test_cycle["Mpps"][frame_size] = float("%.3f" % pps) test_cycle["pct"][frame_size] = float("%.3f" % pct) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) sleep(5) for n in range(len(self.test_cycles)): @@ -367,9 +367,9 @@ class TestPmd(TestCase): Packet forwarding checking test """ - self.dut.kill_all() + self.sut_node.kill_all() - port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) for rxfreet_value in self.rxfreet_values: @@ -379,8 +379,8 @@ class TestPmd(TestCase): % (port_mask, rxfreet_value), socket=self.ports_socket, ) - self.dut.send_expect("set fwd csum", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd csum", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.send_packet(self.frame_sizes[0], checksum_test=True) @@ -393,7 +393,7 @@ class TestPmd(TestCase): % (rxfreet_value, l4csum_error[1]), ) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) sleep(5) def test_packet_checking(self): @@ -401,19 +401,19 @@ class TestPmd(TestCase): Packet forwarding checking test """ - self.dut.kill_all() + self.sut_node.kill_all() - port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) self.pmdout.start_testpmd( "1S/2C/1T", "--portmask=%s" % port_mask, socket=self.ports_socket ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") for size in self.frame_sizes: self.send_packet(size) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) sleep(5) def test_packet_checking_scalar_mode(self): @@ -421,14 +421,14 @@ class TestPmd(TestCase): Packet forwarding checking test """ - self.dut.kill_all() + self.sut_node.kill_all() - port_mask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) + port_mask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) eal_opts = "" - for port in self.dut_ports: + for port in self.sut_ports: eal_opts += "-a %s,scalar_enable=1 " % ( - self.dut.get_port_pci(self.dut_ports[port]) + self.sut_node.get_port_pci(self.sut_ports[port]) ) self.pmdout.start_testpmd( @@ -437,12 +437,12 @@ class TestPmd(TestCase): eal_param=eal_opts, socket=self.ports_socket, ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") for size in self.frame_sizes: self.send_packet(size, scalar_test=True) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) sleep(5) def stop_and_get_l4csum_errors(self): @@ -450,7 +450,7 @@ class TestPmd(TestCase): Stop forwarding and get Bad-l4csum number from stop statistic """ - out = self.dut.send_expect("stop", "testpmd> ") + out = self.sut_node.send_expect("stop", "testpmd> ") result_scanner = r"Bad-l4csum: ([0-9]+) \s*" scanner = re.compile(result_scanner, re.DOTALL) m = scanner.findall(out) @@ -470,19 +470,19 @@ class TestPmd(TestCase): Send 1 packet to portid """ - port0_stats = self.get_stats(self.dut_ports[0]) + port0_stats = self.get_stats(self.sut_ports[0]) gp0tx_pkts, gp0tx_bytes = [port0_stats["TX-packets"], port0_stats["TX-bytes"]] - port1_stats = self.get_stats(self.dut_ports[1]) + port1_stats = self.get_stats(self.sut_ports[1]) gp1rx_pkts, gp1rx_err, gp1rx_bytes = [ port1_stats["RX-packets"], port1_stats["RX-errors"], port1_stats["RX-bytes"], ] - interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) - mac = self.dut.get_mac_address(self.dut_ports[1]) + mac = self.sut_node.get_mac_address(self.sut_ports[1]) load_size = frame_size - HEADER_SIZE["eth"] padding = ( @@ -497,19 +497,19 @@ class TestPmd(TestCase): pkt_count = 1 else: pkt_count = 4 - self.tester.scapy_foreground() - self.tester.scapy_append('nutmac="%s"' % mac) - self.tester.scapy_append( + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('nutmac="%s"' % mac) + self.tg_node.scapy_append( 'sendp([Ether(dst=nutmac, src="52:00:00:00:00:00")/IP(len=%s)/UDP(%s)/Raw(load="\x50"*%s)], iface="%s", count=%s)' % (load_size, checksum, padding, interface, pkt_count) ) - out = self.tester.scapy_execute() + out = self.tg_node.scapy_execute() time.sleep(0.5) - port0_stats = self.get_stats(self.dut_ports[0]) + port0_stats = self.get_stats(self.sut_ports[0]) p0tx_pkts, p0tx_bytes = [port0_stats["TX-packets"], port0_stats["TX-bytes"]] - port1_stats = self.get_stats(self.dut_ports[1]) + port1_stats = self.get_stats(self.sut_ports[1]) p1rx_pkts, p1rx_err, p1rx_bytes = [ port1_stats["RX-packets"], port1_stats["RX-errors"], @@ -563,4 +563,4 @@ class TestPmd(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_pmd_bonded.py b/tests/TestSuite_pmd_bonded.py index 97f76e93..f1650f75 100644 --- a/tests/TestSuite_pmd_bonded.py +++ b/tests/TestSuite_pmd_bonded.py @@ -58,7 +58,7 @@ class TestPmdBonded(TestCase): Get packets number from port statistic """ - out = self.dut.send_expect("show port stats %d" % portid, "testpmd> ") + out = self.sut_node.send_expect("show port stats %d" % portid, "testpmd> ") if rx_tx == "rx": result_scanner = ( @@ -106,16 +106,16 @@ class TestPmdBonded(TestCase): udp = {} try: - dut_dest_port = self.dut_ports[dest_port] + sut_dest_port = self.sut_ports[dest_port] except Exception as e: - dut_dest_port = dest_port + sut_dest_port = dest_port if not ether_ip.get("ether"): - ether["dest_mac"] = self.dut.get_mac_address(dut_dest_port) + ether["dest_mac"] = self.sut_node.get_mac_address(sut_dest_port) ether["src_mac"] = "52:00:00:00:00:00" else: if not ether_ip["ether"].get("dest_mac"): - ether["dest_mac"] = self.dut.get_mac_address(dut_dest_port) + ether["dest_mac"] = self.sut_node.get_mac_address(sut_dest_port) else: ether["dest_mac"] = ether_ip["ether"]["dest_mac"] if not ether_ip["ether"].get("src_mac"): @@ -212,10 +212,10 @@ class TestPmdBonded(TestCase): if not src_port: gp0rx_pkts, gp0rx_err, gp0rx_bytes = [ - int(_) for _ in self.get_stats(self.dut_ports[dest_port], "rx") + int(_) for _ in self.get_stats(self.sut_ports[dest_port], "rx") ] - itf = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[dest_port]) + itf = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[dest_port]) ) else: gp0rx_pkts, gp0rx_err, gp0rx_bytes = [ @@ -230,30 +230,30 @@ class TestPmdBonded(TestCase): start = time.time() while True: - self.tester.scapy_foreground() - self.tester.scapy_append('nutmac="%s"' % ret_ether_ip["ether"]["dest_mac"]) - self.tester.scapy_append('srcmac="%s"' % ret_ether_ip["ether"]["src_mac"]) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('nutmac="%s"' % ret_ether_ip["ether"]["dest_mac"]) + self.tg_node.scapy_append('srcmac="%s"' % ret_ether_ip["ether"]["src_mac"]) if ether_ip.get("dot1q"): - self.tester.scapy_append("vlanvalue=%d" % ret_ether_ip["dot1q"]["vlan"]) - self.tester.scapy_append('destip="%s"' % ret_ether_ip["ip"]["dest_ip"]) - self.tester.scapy_append('srcip="%s"' % ret_ether_ip["ip"]["src_ip"]) - self.tester.scapy_append("destport=%d" % ret_ether_ip["udp"]["dest_port"]) - self.tester.scapy_append("srcport=%d" % ret_ether_ip["udp"]["src_port"]) + self.tg_node.scapy_append("vlanvalue=%d" % ret_ether_ip["dot1q"]["vlan"]) + self.tg_node.scapy_append('destip="%s"' % ret_ether_ip["ip"]["dest_ip"]) + self.tg_node.scapy_append('srcip="%s"' % ret_ether_ip["ip"]["src_ip"]) + self.tg_node.scapy_append("destport=%d" % ret_ether_ip["udp"]["dest_port"]) + self.tg_node.scapy_append("srcport=%d" % ret_ether_ip["udp"]["src_port"]) if not ret_ether_ip.get("dot1q"): - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst=nutmac, src=srcmac)/IP(dst=destip, src=srcip, len=%s)/\ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' % (pktlen, padding, itf, count) ) else: - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst=nutmac, src=srcmac)/Dot1Q(vlan=vlanvalue)/IP(dst=destip, src=srcip, len=%s)/\ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' % (pktlen, padding, itf, count) ) - self.tester.scapy_execute(timeout=180) + self.tg_node.scapy_execute(timeout=180) loop += 1 now = time.time() @@ -263,7 +263,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' if not src_port: p0rx_pkts, p0rx_err, p0rx_bytes = [ - int(_) for _ in self.get_stats(self.dut_ports[dest_port], "rx") + int(_) for _ in self.get_stats(self.sut_ports[dest_port], "rx") ] else: p0rx_pkts, p0rx_err, p0rx_bytes = [ @@ -288,7 +288,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' Just enter blank for the prompt 'testpmd> ' """ time.sleep(2) - self.dut.send_expect(" ", "testpmd> ") + self.sut_node.send_expect(" ", "testpmd> ") def dummy_timeout(func): """ @@ -311,7 +311,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Encapsulate private expect function because multiple threads printing issue. """ - return self.dut.send_expect(cmds, expected, timeout, alt_session) + return self.sut_node.send_expect(cmds, expected, timeout, alt_session) def get_value_from_str(self, key_str, regx_str, string): """ @@ -329,7 +329,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Get the detail info from the output of pmd cmd 'show port info '. """ - out = self.dut.send_expect("show port info %d" % port, "testpmd> ") + out = self.sut_node.send_expect("show port info %d" % port, "testpmd> ") find_value = self.get_value_from_str(key_str, regx_str, out) return find_value @@ -405,7 +405,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Get info by executing the command "show bonding config". """ - out = self.dut.send_expect("show bonding config %d" % bond_port, "testpmd> ") + out = self.sut_node.send_expect("show bonding config %d" % bond_port, "testpmd> ") find_value = self.get_value_from_str(key_str, regx_str, out) return find_value @@ -461,7 +461,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Create a bonding device with the parameters you specified. """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "create bonded device %d %d" % (mode, socket), "testpmd> " ) self.verify( @@ -474,7 +474,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' bond_port = int(bond_port) if verify_detail: - out = self.dut.send_expect( + out = self.sut_node.send_expect( "show bonding config %d" % bond_port, "testpmd> " ) self.verify( @@ -493,7 +493,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' "Primary display error when create bonded device", ) - out = self.dut.send_expect("show port info %d" % bond_port, "testpmd> ") + out = self.sut_node.send_expect("show port info %d" % bond_port, "testpmd> ") self.verify( "Connect to socket: %d" % socket in out, "Bonding port connect socket error", @@ -551,7 +551,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' if len(slave_port) <= 0: utils.RED("No port exist when remove slave from bonded device") for slave_id in slave_port: - self.dut.send_expect( + self.sut_node.send_expect( "remove bonding slave %d %d" % (int(slave_id), bond_port), "testpmd> " ) out = self.get_info_from_bond_config("Slaves: \[", "\d*( \d*)*", bond_port) @@ -582,7 +582,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Set the primary slave for the bonding device. """ - self.dut.send_expect( + self.sut_node.send_expect( "set bonding primary %d %d" % (slave_port, bond_port), "testpmd> " ) out = self.get_info_from_bond_config("Primary: \[", "\d*", bond_port) @@ -598,7 +598,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Set the mode for the bonding device. """ - self.dut.send_expect("set bonding mode %d %d" % (mode, bond_port), "testpmd> ") + self.sut_node.send_expect("set bonding mode %d %d" % (mode, bond_port), "testpmd> ") mode_value = self.get_bond_mode(bond_port) self.verify(str(mode) in mode_value, "Set bonding mode failed") @@ -606,7 +606,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Set the MAC for the bonding device. """ - self.dut.send_expect( + self.sut_node.send_expect( "set bonding mac_addr %s %s" % (bond_port, mac), "testpmd> " ) new_mac = self.get_port_mac(bond_port) @@ -616,7 +616,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ Set the balance transmit policy for the bonding device. """ - self.dut.send_expect( + self.sut_node.send_expect( "set bonding balance_xmit_policy %d %s" % (bond_port, policy), "testpmd> " ) new_policy = self.get_bond_balance_policy(bond_port) @@ -645,12 +645,12 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' pkt_orig = self.get_all_stats(unbound_port, "tx", bond_port, **slaves) for slave in slaves["active"]: temp_count = self.send_packet( - self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count + self.sut_ports[slave], False, FRAME_SIZE_64, pkt_count ) summary += temp_count for slave in slaves["inactive"]: self.send_packet( - self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count, True + self.sut_ports[slave], False, FRAME_SIZE_64, pkt_count, True ) pkt_now = self.get_all_stats(unbound_port, "tx", bond_port, **slaves) @@ -699,7 +699,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' pkt_orig = self.get_all_stats(unbound_port, "tx", bond_port, **slaves) for slave in slaves["active"]: temp_count = self.send_packet( - self.dut_ports[slave], + self.sut_ports[slave], False, pkt_size, pkt_count, @@ -709,7 +709,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' summary += temp_count for slave in slaves["inactive"]: self.send_packet( - self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count, True + self.sut_ports[slave], False, FRAME_SIZE_64, pkt_count, True ) pkt_now = self.get_all_stats(unbound_port, "tx", bond_port, **slaves) @@ -720,7 +720,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' return pkt_now, summary def send_customized_packet_to_bond_port( - self, dut_unbound_port, dut_bond_port, tester_bond_port, pkt_count=100, **slaves + self, sut_unbound_port, sut_bond_port, tg_bond_port, pkt_count=100, **slaves ): pkt_orig = {} pkt_now = {} @@ -728,14 +728,14 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' summary = 0 # send to bond_port - pkt_orig = self.get_all_stats(dut_unbound_port, "tx", dut_bond_port, **slaves) + pkt_orig = self.get_all_stats(sut_unbound_port, "tx", sut_bond_port, **slaves) if len(slaves["active"]) != 0: invert_verify = False else: invert_verify = True - dest_mac = self.get_port_mac(dut_bond_port) + dest_mac = self.get_port_mac(sut_bond_port) ether_ip = {} ether = {} @@ -749,15 +749,15 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' for src_mac, src_ip, src_port in source: ether_ip["ether"]["src_mac"] = src_mac temp_count = self.send_packet( - dut_bond_port, - tester_bond_port, + sut_bond_port, + tg_bond_port, FRAME_SIZE_64, pkt_count, invert_verify, **ether_ip, ) summary += temp_count - pkt_now = self.get_all_stats(dut_unbound_port, "tx", dut_bond_port, **slaves) + pkt_now = self.get_all_stats(sut_unbound_port, "tx", sut_bond_port, **slaves) for key in pkt_now: for num in [0, 1, 2]: @@ -815,7 +815,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' # send to unbound_port pkt_orig = self.get_all_stats(unbound_port, "rx", bond_port, **slaves) - dest_mac = self.dut.get_mac_address(self.dut_ports[unbound_port]) + dest_mac = self.sut_node.get_mac_address(self.sut_ports[unbound_port]) dest_ip = "10.239.129.88" dest_port = 53 @@ -872,26 +872,26 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' self.ip_head_size = 20 self.udp_header_size = 8 - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() - self.port_mask = utils.create_mask(self.dut_ports) + self.port_mask = utils.create_mask(self.sut_ports) - self.verify(len(self.dut_ports) >= 4, "Insufficient ports") + self.verify(len(self.sut_ports) >= 4, "Insufficient ports") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) - self.all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) + self.all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) - self.tester_bond = "bond0" + self.tg_bond = "bond0" - for port in self.dut_ports: - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) - driver = self.tester.ports_info[tester_port]["port"].get_nic_driver() + for port in self.sut_ports: + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) + driver = self.tg_node.ports_info[tg_port]["port"].get_nic_driver() if driver == "i40e": - self.tester.send_expect( + self.tg_node.send_expect( "ethtool --set-priv-flags %s link-down-on-close on" % intf, "# ", 10 ) @@ -912,34 +912,34 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' such as adding, removing, setting primary or setting mode. """ bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True) - self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1]) + self.add_slave_to_bonding_device(bond_port_0, False, self.sut_ports[1]) mode_value = self.get_bond_mode(bond_port_0) self.verify("%d" % mode_set in mode_value, "Setting bonding mode error") bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0) - self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0]) - self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0]) + self.add_slave_to_bonding_device(bond_port_0, False, self.sut_ports[0]) + self.add_slave_to_bonding_device(bond_port_1, True, self.sut_ports[0]) OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1 self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE) self.set_mode_for_bonding_device(bond_port_0, mode_set) - self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2]) + self.add_slave_to_bonding_device(bond_port_0, False, self.sut_ports[2]) time.sleep(5) - self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2]) + self.set_primary_for_bonding_device(bond_port_0, self.sut_ports[2]) - self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2]) + self.remove_slave_from_bonding_device(bond_port_0, False, self.sut_ports[2]) primary_now = self.get_bond_primary(bond_port_0) self.verify( - int(primary_now) == self.dut_ports[1], + int(primary_now) == self.sut_ports[1], "Reset primary slave failed after removing primary slave", ) for bond_port in [bond_port_0, bond_port_1]: self.remove_all_slaves(bond_port) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_app() def verify_bound_mac_opt(self, mode_set): @@ -947,13 +947,13 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' Create bonded device, add one slave, verify bonded device MAC action varies with the mode. """ - mac_address_0_orig = self.get_port_mac(self.dut_ports[0]) - mac_address_1_orig = self.get_port_mac(self.dut_ports[1]) - mac_address_2_orig = self.get_port_mac(self.dut_ports[2]) - mac_address_3_orig = self.get_port_mac(self.dut_ports[3]) + mac_address_0_orig = self.get_port_mac(self.sut_ports[0]) + mac_address_1_orig = self.get_port_mac(self.sut_ports[1]) + mac_address_2_orig = self.get_port_mac(self.sut_ports[2]) + mac_address_3_orig = self.get_port_mac(self.sut_ports[3]) bond_port = self.create_bonded_device(mode_set, SOCKET_1) - self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1]) + self.add_slave_to_bonding_device(bond_port, False, self.sut_ports[1]) mac_address_bond_orig = self.get_port_mac(bond_port) self.verify( @@ -961,8 +961,8 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' "Bonded device MAC address not same with first slave MAC", ) - self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2]) - mac_address_2_now = self.get_port_mac(self.dut_ports[2]) + self.add_slave_to_bonding_device(bond_port, False, self.sut_ports[2]) + mac_address_2_now = self.get_port_mac(self.sut_ports[2]) mac_address_bond_now = self.get_port_mac(bond_port) if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]: self.verify( @@ -981,8 +981,8 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' new_mac = "00:11:22:00:33:44" self.set_mac_for_bonding_device(bond_port, new_mac) self.start_port(bond_port) - mac_address_1_now = self.get_port_mac(self.dut_ports[1]) - mac_address_2_now = self.get_port_mac(self.dut_ports[2]) + mac_address_1_now = self.get_port_mac(self.sut_ports[1]) + mac_address_2_now = self.get_port_mac(self.sut_ports[2]) mac_address_bond_now = self.get_port_mac(bond_port) if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]: self.verify( @@ -1008,23 +1008,23 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' "Set mac failed for bonding device in mode %d" % mode_set, ) - self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False) - mac_address_1_now = self.get_port_mac(self.dut_ports[1]) - mac_address_2_now = self.get_port_mac(self.dut_ports[2]) + self.set_primary_for_bonding_device(bond_port, self.sut_ports[2], False) + mac_address_1_now = self.get_port_mac(self.sut_ports[1]) + mac_address_2_now = self.get_port_mac(self.sut_ports[2]) mac_address_bond_now = self.get_port_mac(bond_port) self.verify( mac_address_bond_now == new_mac, "Slave MAC changed when set primary slave" ) mac_address_1_orig = mac_address_1_now - self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2]) - mac_address_2_now = self.get_port_mac(self.dut_ports[2]) + self.remove_slave_from_bonding_device(bond_port, False, self.sut_ports[2]) + mac_address_2_now = self.get_port_mac(self.sut_ports[2]) self.verify( mac_address_2_now == mac_address_2_orig, "MAC not back to original after removing the port", ) - mac_address_1_now = self.get_port_mac(self.dut_ports[1]) + mac_address_1_now = self.get_port_mac(self.sut_ports[1]) mac_address_bond_now = self.get_port_mac(bond_port) self.verify( mac_address_bond_now == new_mac and mac_address_1_now == mac_address_1_orig, @@ -1032,7 +1032,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' ) self.remove_all_slaves(bond_port) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_app() def verify_bound_promisc_opt(self, mode_set): @@ -1040,19 +1040,19 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' Set promiscuous mode on bonded device, verify bonded device and all slaves have different actions by the different modes. """ - unbound_port = self.dut_ports[3] + unbound_port = self.sut_ports[3] bond_port = self.create_bonded_device(mode_set, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( + self.sut_node.send_expect( "set portlist %d,%d" % (unbound_port, bond_port), "testpmd> " ) self.start_port(bond_port) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") port_disabled_num = 0 - testpmd_all_ports = self.dut_ports + testpmd_all_ports = self.sut_ports testpmd_all_ports.append(bond_port) for port_id in testpmd_all_ports: value = self.get_detail_from_port_info( @@ -1076,9 +1076,9 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' pkt_info = [ether_ip, send_param] slaves = {} - slaves["active"] = [self.dut_ports[0]] + slaves["active"] = [self.sut_ports[0]] slaves["inactive"] = [] - curr_primary = self.dut_ports[0] + curr_primary = self.sut_ports[0] pkt_now, summary = self.send_customized_packet_to_slave( unbound_port, bond_port, *pkt_info, **slaves @@ -1095,17 +1095,17 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' ) else: self.verify( - pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] + pkt_now[self.sut_ports[0]][0] == pkt_now[bond_port][0] and pkt_now[bond_port][0] == pkt_count, "Data not received by slave or bonding device when promiscuous enabled", ) - self.dut.send_expect("set promisc %s off" % bond_port, "testpmd> ") + self.sut_node.send_expect("set promisc %s off" % bond_port, "testpmd> ") port_disabled_num = 0 testpmd_all_ports = [ - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], bond_port, ] for port_id in testpmd_all_ports: @@ -1157,7 +1157,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' ) pkt_now, summary = self.send_default_packet_to_slave( - self.dut_ports[3], bond_port, pkt_count, **slaves + self.sut_ports[3], bond_port, pkt_count, **slaves ) if mode_set == MODE_LACP: do_transmit = False @@ -1172,16 +1172,16 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' else: self.verify( pkt_now[curr_primary][0] == pkt_now[bond_port][0] - and pkt_now[self.dut_ports[3]][0] == pkt_now[bond_port][0] + and pkt_now[self.sut_ports[3]][0] == pkt_now[bond_port][0] and pkt_now[bond_port][0] == pkt_count, "RX or TX packet number not correct when promiscuous disabled", ) # Stop fwd threads first before removing slaves from bond to avoid # races and crashes - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") self.remove_all_slaves(bond_port) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.launch_app() def test_bound_basic_opt(self): @@ -1193,15 +1193,15 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' def test_bound_promisc_opt(self): self.verify_bound_promisc_opt(MODE_BROADCAST) - def admin_tester_port(self, local_port, status): + def admin_tg_port(self, local_port, status): """ Do some operations to the network interface port, such as "up" or "down". """ - if self.tester.get_os_type() == "freebsd": - self.tester.admin_ports(local_port, status) + if self.tg_node.get_os_type() == "freebsd": + self.tg_node.admin_ports(local_port, status) else: - eth = self.tester.get_interface(local_port) - self.tester.admin_ports_linux(eth, status) + eth = self.tg_node.get_interface(local_port) + self.tg_node.admin_ports_linux(eth, status) time.sleep(10) def verify_round_robin_rx(self, unbound_port, bond_port, **slaves): @@ -1266,19 +1266,19 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_round_robin_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_round_robin_tx(self.sut_ports[3], bond_port, **slaves) def test_round_robin_one_slave_down(self): """ @@ -1287,29 +1287,29 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") - stat = self.tester.get_port_status( - self.tester.get_local_port(self.dut_ports[0]) + stat = self.tg_node.get_port_status( + self.tg_node.get_local_port(self.sut_ports[0]) ) - self.dut.send_expect("show bonding config %d" % bond_port, "testpmd> ") - self.dut.send_expect("show port info all", "testpmd> ") + self.sut_node.send_expect("show bonding config %d" % bond_port, "testpmd> ") + self.sut_node.send_expect("show port info all", "testpmd> ") try: slaves = {} - slaves["active"] = [self.dut_ports[1], self.dut_ports[2]] - slaves["inactive"] = [self.dut_ports[0]] - self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves) + slaves["active"] = [self.sut_ports[1], self.sut_ports[2]] + slaves["inactive"] = [self.sut_ports[0]] + self.verify_round_robin_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_round_robin_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") def test_round_robin_all_slaves_down(self): """ @@ -1318,32 +1318,32 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "down") try: slaves = {} slaves["active"] = [] slaves["inactive"] = [ - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], ] - self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_round_robin_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_round_robin_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "up") def get_all_stats(self, unbound_port, rx_tx, bond_port, **slaves): """ @@ -1456,20 +1456,20 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") time.sleep(5) slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_active_backup_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_active_backup_tx(self.sut_ports[3], bond_port, **slaves) def test_active_backup_change_primary(self): """ @@ -1478,21 +1478,21 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.set_primary_for_bonding_device(bond_port, self.dut_ports[1]) + self.sut_node.send_expect("start", "testpmd> ") + self.set_primary_for_bonding_device(bond_port, self.sut_ports[1]) time.sleep(5) slaves = {} - slaves["active"] = [self.dut_ports[1], self.dut_ports[0], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[1], self.sut_ports[0], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_active_backup_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_active_backup_tx(self.sut_ports[3], bond_port, **slaves) def test_active_backup_one_slave_down(self): """ @@ -1501,27 +1501,27 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") primary_port = int(self.get_bond_primary(bond_port)) try: slaves = {} - active_slaves = [self.dut_ports[1], self.dut_ports[2]] + active_slaves = [self.sut_ports[1], self.sut_ports[2]] active_slaves.remove(primary_port) slaves["active"] = [primary_port] slaves["active"].extend(active_slaves) - slaves["inactive"] = [self.dut_ports[0]] - self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves) + slaves["inactive"] = [self.sut_ports[0]] + self.verify_active_backup_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_active_backup_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") def test_active_backup_all_slaves_down(self): """ @@ -1530,31 +1530,31 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "down") try: slaves = {} slaves["active"] = [] slaves["inactive"] = [ - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], ] - self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_active_backup_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_active_backup_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "up") def translate_mac_str_into_int(self, mac_str): """ @@ -1727,19 +1727,19 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_xor_tx(self.dut_ports[3], bond_port, "L2", False, **slaves) + self.verify_xor_tx(self.sut_ports[3], bond_port, "L2", False, **slaves) def test_xor_tx_one_slave_down(self): """ @@ -1748,23 +1748,23 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1] + bond_port, False, self.sut_ports[0], self.sut_ports[2], self.sut_ports[1] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") try: slaves = {} - slaves["active"] = [self.dut_ports[1], self.dut_ports[2]] - slaves["inactive"] = [self.dut_ports[0]] + slaves["active"] = [self.sut_ports[1], self.sut_ports[2]] + slaves["inactive"] = [self.sut_ports[0]] - self.verify_xor_tx(self.dut_ports[3], bond_port, "L2", False, **slaves) + self.verify_xor_tx(self.sut_ports[3], bond_port, "L2", False, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") def test_xor_tx_all_slaves_down(self): """ @@ -1773,41 +1773,41 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "down") try: slaves = {} slaves["active"] = [] slaves["inactive"] = [ - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], ] - self.verify_xor_tx(self.dut_ports[3], bond_port, "L2", False, **slaves) + self.verify_xor_tx(self.sut_ports[3], bond_port, "L2", False, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "up") def vlan_strip_and_filter(self, action="off", *ports): """ Open or shutdown the vlan strip and filter option of specified port. """ for port_id in ports: - self.dut.send_expect( + self.sut_node.send_expect( "vlan set strip %s %d" % (action, port_id), "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "vlan set filter %s %d" % (action, port_id), "testpmd> " ) @@ -1818,29 +1818,29 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.set_balance_policy_for_bonding_device(bond_port, "l34") self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_xor_tx(self.dut_ports[3], bond_port, "L34", False, **slaves) + self.verify_xor_tx(self.sut_ports[3], bond_port, "L34", False, **slaves) self.vlan_strip_and_filter( "off", - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], - self.dut_ports[3], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], + self.sut_ports[3], bond_port, ) - self.verify_xor_tx(self.dut_ports[3], bond_port, "L34", True, **slaves) + self.verify_xor_tx(self.sut_ports[3], bond_port, "L34", True, **slaves) def test_xor_rx(self): """ @@ -1848,19 +1848,19 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_xor_rx(self.dut_ports[3], bond_port, **slaves) + self.verify_xor_rx(self.sut_ports[3], bond_port, **slaves) def verify_broadcast_rx(self, unbound_port, bond_port, **slaves): """ @@ -1932,20 +1932,20 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_broadcast_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_broadcast_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_broadcast_tx(self.sut_ports[3], bond_port, **slaves) def test_broadcast_tx_one_slave_down(self): """ @@ -1954,23 +1954,23 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") try: slaves = {} - slaves["active"] = [self.dut_ports[1], self.dut_ports[2]] - slaves["inactive"] = [self.dut_ports[0]] + slaves["active"] = [self.sut_ports[1], self.sut_ports[2]] + slaves["inactive"] = [self.sut_ports[0]] - self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_broadcast_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") def test_broadcast_tx_all_slaves_down(self): """ @@ -1979,31 +1979,31 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' """ bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "down") try: slaves = {} slaves["active"] = [] slaves["inactive"] = [ - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], ] - self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_broadcast_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "up") def verify_lacp_rx(self, unbound_port, bond_port, **slaves): """ @@ -2025,7 +2025,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' active_flag = 0 pkt_now, summary = self.send_customized_packet_to_bond_port( - unbound_port, bond_port, self.tester_bond, pkt_count, **slaves + unbound_port, bond_port, self.tg_bond, pkt_count, **slaves ) active_summary = 0 @@ -2040,7 +2040,7 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' pkt_now[slave][0] == 0, "Inactive slave have incorrect RX packet number in LACP", ) - self.dut.send_expect("show port info %d" % self.dut_ports[3], "testpmd> ") + self.sut_node.send_expect("show port info %d" % self.sut_ports[3], "testpmd> ") self.verify( pkt_now[unbound_port][0] == summary * active_flag, "Unbonded device has incorrect TX packet number in LACP", @@ -2090,48 +2090,48 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' "LACP load balance receive incorrectly on the unbound port", ) - def add_linux_bond_device(self, bond_mode, bond_name="bond0", *tester_local_ports): - if self.tester.get_os_type() == "linux": - self.tester.send_expect( + def add_linux_bond_device(self, bond_mode, bond_name="bond0", *tg_local_ports): + if self.tg_node.get_os_type() == "linux": + self.tg_node.send_expect( "modprobe bonding mode=%d miimon=100" % int(bond_mode), "# " ) - self.tester.send_expect("ifconfig %s up" % bond_name, "# ") + self.tg_node.send_expect("ifconfig %s up" % bond_name, "# ") - tester_bond_intfs = [ - self.tester.get_interface(port) for port in tester_local_ports + tg_bond_intfs = [ + self.tg_node.get_interface(port) for port in tg_local_ports ] - for intf in tester_bond_intfs: - self.tester.send_expect("ifenslave -f %s %s" % (bond_name, intf), "# ") + for intf in tg_bond_intfs: + self.tg_node.send_expect("ifenslave -f %s %s" % (bond_name, intf), "# ") if not self.slave_is_here_linux(bond_name, intf): self.verify(False, "Add linux bond device failed") - for port in tester_local_ports: - self.admin_tester_port(port, "up") + for port in tg_local_ports: + self.admin_tg_port(port, "up") else: self.verify( - False, "Not support to verify LACP on OS %s" % self.tester.get_os_type() + False, "Not support to verify LACP on OS %s" % self.tg_node.get_os_type() ) - def detach_linux_bond_device(self, bond_name="bond0", *tester_local_ports): - tester_bond_intf = [ - self.tester.get_interface(port) for port in tester_local_ports + def detach_linux_bond_device(self, bond_name="bond0", *tg_local_ports): + tg_bond_intf = [ + self.tg_node.get_interface(port) for port in tg_local_ports ] - if self.tester.get_os_type() == "linux": - for intf in tester_bond_intf: + if self.tg_node.get_os_type() == "linux": + for intf in tg_bond_intf: if self.slave_is_here_linux(bond_name, intf): - self.tester.send_expect( + self.tg_node.send_expect( "ifenslave -d %s %s" % (bond_name, intf), "# " ) if self.slave_is_here_linux(bond_name, intf): self.verify(False, "Delete linux bond device failed") - for port in tester_local_ports: - self.admin_tester_port(port, "up") + for port in tg_local_ports: + self.admin_tg_port(port, "up") else: self.verify( - False, "Not support to verify LACP on OS %s" % self.tester.get_os_type() + False, "Not support to verify LACP on OS %s" % self.tg_node.get_os_type() ) def slave_is_here_linux(self, bond_name="bond0", *interfaces): - out = self.tester.send_expect("cat /proc/net/bonding/%s" % bond_name, "# ") + out = self.tg_node.send_expect("cat /proc/net/bonding/%s" % bond_name, "# ") for intf in interfaces: if re.search(intf, out): return True @@ -2140,25 +2140,25 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' def setup_and_clear_lacp(func): """ - Setting lacp test environment on tester. + Setting lacp test environment on TG. """ @wraps(func) def test_env(*args, **kwargs): pmd_bond_instance = args[0] try: - dut_ports = [pmd_bond_instance.dut_ports[port] for port in [0, 1, 2]] - tester = pmd_bond_instance.tester - tester_local_ports = [tester.get_local_port(port) for port in dut_ports] + sut_ports = [pmd_bond_instance.sut_ports[port] for port in [0, 1, 2]] + tg_node = pmd_bond_instance.tg_node + tg_local_ports = [tg_node.get_local_port(port) for port in sut_ports] pmd_bond_instance.add_linux_bond_device( - MODE_LACP, pmd_bond_instance.tester_bond, *tester_local_ports + MODE_LACP, pmd_bond_instance.tg_bond, *tg_local_ports ) func(*args, **kwargs) finally: pmd_bond_instance.detach_linux_bond_device( - pmd_bond_instance.tester_bond, *tester_local_ports + pmd_bond_instance.tg_bond, *tg_local_ports ) return test_env @@ -2170,14 +2170,14 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' def clear_env(*args, **kwargs): pmd_bond_instance = args[0] try: - dut_ports = [pmd_bond_instance.dut_ports[port] for port in [0, 1, 2]] - tester = pmd_bond_instance.tester - tester_local_ports = [tester.get_local_port(port) for port in dut_ports] + sut_ports = [pmd_bond_instance.sut_ports[port] for port in [0, 1, 2]] + tg_node = pmd_bond_instance.tg_node + tg_local_ports = [tg_node.get_local_port(port) for port in sut_ports] func(*args, **kwargs) finally: pmd_bond_instance.detach_linux_bond_device( - pmd_bond_instance.tester_bond, *tester_local_ports + pmd_bond_instance.tg_bond, *tg_local_ports ) return clear_env @@ -2272,78 +2272,78 @@ UDP(sport=srcport, dport=destport)/Raw(load="\x50"*%s)], iface="%s", count=%d)' def test_tlb_rx_tx(self): bond_port = self.create_bonded_device(MODE_TLB_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") slaves = {} - slaves["active"] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] + slaves["active"] = [self.sut_ports[0], self.sut_ports[1], self.sut_ports[2]] slaves["inactive"] = [] - self.verify_tlb_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_tlb_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_tlb_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_tlb_tx(self.sut_ports[3], bond_port, **slaves) def test_tlb_one_slave_dwon(self): bond_port = self.create_bonded_device(MODE_TLB_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") try: slaves = {} - slaves["active"] = [self.dut_ports[1], self.dut_ports[2]] - slaves["inactive"] = [self.dut_ports[0]] + slaves["active"] = [self.sut_ports[1], self.sut_ports[2]] + slaves["inactive"] = [self.sut_ports[0]] - self.verify_tlb_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_tlb_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_tlb_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_tlb_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") def test_tlb_all_slaves_down(self): bond_port = self.create_bonded_device(MODE_TLB_BALANCE, SOCKET_0) self.add_slave_to_bonding_device( - bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2] + bond_port, False, self.sut_ports[0], self.sut_ports[1], self.sut_ports[2] ) - self.dut.send_expect( - "set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> " + self.sut_node.send_expect( + "set portlist %d,%d" % (self.sut_ports[3], bond_port), "testpmd> " ) self.start_all_ports() - self.dut.send_expect("start", "testpmd> ") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") + self.sut_node.send_expect("start", "testpmd> ") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "down") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "down") try: slaves = {} slaves["active"] = [] slaves["inactive"] = [ - self.dut_ports[0], - self.dut_ports[1], - self.dut_ports[2], + self.sut_ports[0], + self.sut_ports[1], + self.sut_ports[2], ] - self.verify_tlb_rx(self.dut_ports[3], bond_port, **slaves) - self.verify_tlb_tx(self.dut_ports[3], bond_port, **slaves) + self.verify_tlb_rx(self.sut_ports[3], bond_port, **slaves) + self.verify_tlb_tx(self.sut_ports[3], bond_port, **slaves) finally: - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") - self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[0]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[1]), "up") + self.admin_tg_port(self.tg_node.get_local_port(self.sut_ports[2]), "up") def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): """ diff --git a/tests/TestSuite_pmd_bonded_8023ad.py b/tests/TestSuite_pmd_bonded_8023ad.py index a9577dff..6540ede2 100644 --- a/tests/TestSuite_pmd_bonded_8023ad.py +++ b/tests/TestSuite_pmd_bonded_8023ad.py @@ -24,7 +24,7 @@ class TestBonding8023AD(TestCase): DEDICATED_QUEUES = ["disable", "enable"] # - # On dut, dpdk bonding + # On SUT, dpdk bonding # def set_8023ad_agg_mode(self, bond_port, mode="bandwidth"): @@ -85,7 +85,7 @@ class TestBonding8023AD(TestCase): def set_8023ad_bonded(self, slaves, bond_mode, ignore=True): """set 802.3ad bonded mode for the specified bonding mode""" - specified_socket = self.dut.get_numa_id(slaves[0]) + specified_socket = self.sut_node.get_numa_id(slaves[0]) # create bonded device, add slaves in it bond_port = self.bond_inst.create_bonded_device(bond_mode, specified_socket) if not ignore: @@ -99,7 +99,7 @@ class TestBonding8023AD(TestCase): def set_8023ad_bonded2(self, slaves, bond_mode, ignore=True): """set 802.3ad bonded mode for the specified bonding mode""" - specified_socket = self.dut.get_numa_id(slaves[0]) + specified_socket = self.sut_node.get_numa_id(slaves[0]) # create bonded device, add slaves in it bond_port = self.bond_inst.create_bonded_device(bond_mode, specified_socket) if not ignore: @@ -113,7 +113,7 @@ class TestBonding8023AD(TestCase): """get slaves ports pci address""" slaves_pci = [] for port_id in slaves: - slaves_pci.append(self.dut.ports_info[port_id]["pci"]) + slaves_pci.append(self.sut_node.ports_info[port_id]["pci"]) if not slaves_pci: msg = "can't find tx_port pci" self.logger.error(msg) @@ -126,7 +126,7 @@ class TestBonding8023AD(TestCase): # for port link up is slow and unstable, # every port should start one by one cmds = [] - port_num = len(self.dut_ports) + port_num = len(self.sut_ports) start_fmt = "port start {0}".format for cnt in range(port_num): cmds.append([start_fmt(cnt), "", 5]) @@ -354,7 +354,7 @@ class TestBonding8023AD(TestCase): raise VerifyFailure("check_8023ad_dedicated_queues is failed") def get_commandline_options(self, agg_mode): - slaves = self.dut_ports + slaves = self.sut_ports # get bonding port configuration slave_pcis = self.get_pci_link(slaves) # create commandline option format @@ -368,7 +368,7 @@ class TestBonding8023AD(TestCase): option = vdev_format.format(agg_mode) vdev_option = " --vdev '{0}'".format(option) # 802.3ad bond port only create one, it must be the max port number - bond_port = len(self.dut_ports) + bond_port = len(self.sut_ports) return bond_port, vdev_option def run_test_pre(self, agg_mode): @@ -385,7 +385,7 @@ class TestBonding8023AD(TestCase): msg = fmt.format(agg_mode, cur_agg_mode) self.logger.warning(msg) # get forwarding port - for port_id in range(len(self.dut_ports)): + for port_id in range(len(self.sut_ports)): # select a non-slave port as forwarding port to do transmitting if str(port_id) not in cur_slaves: tx_port_id = port_id @@ -401,7 +401,7 @@ class TestBonding8023AD(TestCase): def run_dpdk_functional_pre(self): mode = MODE_LACP - slaves = self.dut_ports[:] + slaves = self.sut_ports[:] self.bond_inst.start_testpmd() bond_port = self.run_8023ad_pre(slaves, mode) return slaves, bond_port @@ -493,13 +493,13 @@ class TestBonding8023AD(TestCase): self.verify("bsdapp" not in self.target, "Bonding not support freebsd") # ------------------------------------------------------------ # link peer resource - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() required_link = 2 - self.verify(len(self.dut_ports) >= required_link, "Insufficient ports") + self.verify(len(self.sut_ports) >= required_link, "Insufficient ports") # ------------------------------------------------------------ # 802.3ad related self.bond_port = None - self.bond_slave = self.dut_ports[0] + self.bond_slave = self.sut_ports[0] # ---------------------------------------------------------------- # initialize bonding common methods name config = { @@ -611,9 +611,9 @@ class TestBonding8023AD(TestCase): def test_basic_behav_agg_mode(self): """test 802.3ad aggregator mode setting""" mode = MODE_LACP - self.check_8023ad_agg_modes(self.dut_ports, mode) + self.check_8023ad_agg_modes(self.sut_ports, mode) def test_basic_dedicated_queues(self): """test 802.3ad dedicated queues setting""" mode = MODE_LACP - self.check_8023ad_dedicated_queues(self.dut_ports, mode) + self.check_8023ad_dedicated_queues(self.sut_ports, mode) diff --git a/tests/TestSuite_pmd_stacked_bonded.py b/tests/TestSuite_pmd_stacked_bonded.py index 2e47fecc..083d4400 100644 --- a/tests/TestSuite_pmd_stacked_bonded.py +++ b/tests/TestSuite_pmd_stacked_bonded.py @@ -28,7 +28,7 @@ from .bonding import ( class TestBondingStacked(TestCase): # - # On dut, dpdk bonding + # On SUT, dpdk bonding # def check_bonded_device_queue_config(self, *devices): """ @@ -52,7 +52,7 @@ class TestBondingStacked(TestCase): set stacked bonded mode for a custom bonding mode """ inst = self.bond_inst - socket_id = self.dut.get_numa_id(self.bond_slave) + socket_id = self.sut_node.get_numa_id(self.bond_slave) # create first bonded device 1, add slaves in it bond_port_1 = inst.create_bonded_device(bond_mode, socket_id) inst.add_slave(bond_port_1, False, "", *slaveGrpOne) @@ -81,7 +81,7 @@ class TestBondingStacked(TestCase): more than 2 """ inst = self.bond_inst - socket_id = self.dut.get_numa_id(self.bond_slave) + socket_id = self.sut_node.get_numa_id(self.bond_slave) third_bond_port = inst.create_bonded_device(bond_mode, socket_id) inst.add_slave(third_bond_port, False, "", *[bond_port]) @@ -158,8 +158,8 @@ class TestBondingStacked(TestCase): def active_slave_rx(self, slave, bond_port, mode): msg = "send packet to active slave port <{0}>".format(slave) self.logger.info(msg) - tx_intf = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[slave]) + tx_intf = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[slave]) ) # get traffic config traffic_config = self.config_port_traffic(tx_intf, slave, self.total_pkt) @@ -171,7 +171,7 @@ class TestBondingStacked(TestCase): msg = "port <{0}> Data not received by port <{1}>".format(tx_intf, slave) # here using `>=` to ignore some miscellaneous packets, e.g. lldp self.verify(stats[slave]["RX-packets"] >= self.total_pkt, msg) - msg = "tester port {0} <----> dut port {1} is ok".format(tx_intf, slave) + msg = "TG port {0} <----> SUT port {1} is ok".format(tx_intf, slave) self.logger.info(msg) # check bond port statistics # here using `>=` to ignore some miscellaneous packets, e.g. lldp @@ -183,8 +183,8 @@ class TestBondingStacked(TestCase): def inactive_slave_rx(self, slave, bond_port, mode): msg = "send packet to inactive slave port <{0}>".format(slave) self.logger.info(msg) - tx_intf = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[slave]) + tx_intf = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[slave]) ) # get traffic config traffic_config = self.config_port_traffic(tx_intf, slave, self.total_pkt) @@ -197,7 +197,7 @@ class TestBondingStacked(TestCase): tx_intf, slave ) self.verify(stats[slave]["RX-packets"] == 0, msg) - msg = "tester port {0} <-| |-> dut port {1} is blocked".format(tx_intf, slave) + msg = "TG port {0} <-| |-> SUT port {1} is blocked".format(tx_intf, slave) self.logger.info(msg) # check bond port statistics self.verify( @@ -226,7 +226,7 @@ class TestBondingStacked(TestCase): # --------------------------------------------------- # set one slave of first bonded device link down primary_slave = slaveGrpOne[0] - self.bond_inst.set_dut_port_status(primary_slave, "down") + self.bond_inst.set_sut_port_status(primary_slave, "down") slaves["inactive"].append(primary_slave) # get slave status primary_port, active_slaves = self.bond_inst.get_active_slaves(bond_port_1) @@ -237,7 +237,7 @@ class TestBondingStacked(TestCase): # --------------------------------------------------- # set one slave of second bonded device link down primary_slave = slaveGrpTwo[0] - self.bond_inst.set_dut_port_status(primary_slave, "down") + self.bond_inst.set_sut_port_status(primary_slave, "down") slaves["inactive"].append(primary_slave) # check active slaves primary_port_2, active_slaves_2 = self.bond_inst.get_active_slaves( @@ -358,14 +358,14 @@ class TestBondingStacked(TestCase): Run before each test suite """ self.verify("bsdapp" not in self.target, "Bonding not support freebsd") - self.dut_ports = self.dut.get_ports() - num_ports = len(self.dut_ports) + self.sut_ports = self.sut_node.get_ports() + num_ports = len(self.sut_ports) self.verify(num_ports == 2 or num_ports == 4, "Insufficient ports") # separate ports into two group as first level bond ports' slaves - sep_index = len(self.dut_ports) // 2 - self.slaveGrpOne = self.dut_ports[:sep_index] - self.slaveGrpTwo = self.dut_ports[sep_index:] - self.bond_slave = self.dut_ports[0] + sep_index = len(self.sut_ports) // 2 + self.slaveGrpOne = self.sut_ports[:sep_index] + self.slaveGrpTwo = self.sut_ports[sep_index:] + self.bond_slave = self.sut_ports[0] # initialize bonding common methods name self.total_pkt = 100 config = { @@ -493,7 +493,7 @@ class TestBondingStacked(TestCase): when bringing any one slave of the bonding device link down. """ slave_down_port_limit = 4 - if len(self.dut_ports) < slave_down_port_limit: + if len(self.sut_ports) < slave_down_port_limit: msg = ( "ports less than {0}, " "ignore stacked one slave down check" ).format(slave_down_port_limit) @@ -513,7 +513,7 @@ class TestBondingStacked(TestCase): bringing any one slave of the bonding device link down. """ slave_down_port_limit = 4 - if len(self.dut_ports) < slave_down_port_limit: + if len(self.sut_ports) < slave_down_port_limit: msg = ( "ports less than {0}, " "ignore stacked one slave down check" ).format(slave_down_port_limit) diff --git a/tests/TestSuite_pmdpcap.py b/tests/TestSuite_pmdpcap.py index a0bf3993..13422971 100644 --- a/tests/TestSuite_pmdpcap.py +++ b/tests/TestSuite_pmdpcap.py @@ -20,21 +20,21 @@ from framework.test_case import TestCase class TestPmdPcap(TestCase): pcap_file_sizes = [1000, 500] - dut_pcap_files_path = "/root/" + sut_pcap_files_path = "/root/" def set_up_all(self): - self.check_scapy_in_dut() + self.check_scapy_in_sut() - self.memory_channel = self.dut.get_memory_channels() + self.memory_channel = self.sut_node.get_memory_channels() # make sure there is no interface to bind # because if there is any interface bonded to igb_uio, # it will result in packet transmitting failed - self.dut.restore_interfaces() - os_type = self.dut.get_os_type() + self.sut_node.restore_interfaces() + os_type = self.sut_node.get_os_type() if os_type == "freebsd": - self.dut.send_expect("kldload contigmem", "#", 20) - self.path = self.dut.apps_name["test-pmd"] + self.sut_node.send_expect("kldload contigmem", "#", 20) + self.path = self.sut_node.apps_name["test-pmd"] def create_pcap_file(self, filename, number_of_packets): flow = [] @@ -48,30 +48,30 @@ class TestPmdPcap(TestCase): wrpcap(filename, flow) - def check_scapy_in_dut(self): + def check_scapy_in_sut(self): try: - self.dut.send_expect("scapy", ">>> ") - self.dut.send_expect("quit()", "# ") + self.sut_node.send_expect("scapy", ">>> ") + self.sut_node.send_expect("quit()", "# ") except: - self.verify(False, "Scapy is required in dut.") + self.verify(False, "Scapy is required in SUT.") def check_pcap_files(self, in_pcap, out_pcap, expected_frames): # Check if the number of expected frames are in the output - result = self.dut.send_expect("tcpdump -n -e -r %s | wc -l" % out_pcap, "# ") + result = self.sut_node.send_expect("tcpdump -n -e -r %s | wc -l" % out_pcap, "# ") self.verify( str(expected_frames) in result, "Not all packets have been forwarded" ) # Check if the frames in the input and output files match - self.dut.send_expect("scapy", ">>> ") - self.dut.send_expect('input=rdpcap("%s")' % in_pcap, ">>> ") - self.dut.send_expect('output=rdpcap("%s")' % out_pcap, ">>> ") - self.dut.send_expect( + self.sut_node.send_expect("scapy", ">>> ") + self.sut_node.send_expect('input=rdpcap("%s")' % in_pcap, ">>> ") + self.sut_node.send_expect('output=rdpcap("%s")' % out_pcap, ">>> ") + self.sut_node.send_expect( "result=[input[i]==output[i] for i in range(len(input))]", ">>> " ) - result = self.dut.send_expect("False in result", ">>> ") - self.dut.send_expect("quit()", "# ") + result = self.sut_node.send_expect("False in result", ">>> ") + self.sut_node.send_expect("quit()", "# ") self.verify("True" not in result, "In/Out packets do not match.") @@ -79,12 +79,12 @@ class TestPmdPcap(TestCase): in_pcap = "in_pmdpcap.pcap" out_pcap = "/tmp/out_pmdpcap.pcap" - two_cores = self.dut.get_core_list("1S/2C/1T") + two_cores = self.sut_node.get_core_list("1S/2C/1T") core_mask = utils.create_mask(two_cores) - eal_para = self.dut.create_eal_parameters(cores="1S/2C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/1T") self.create_pcap_file(in_pcap, TestPmdPcap.pcap_file_sizes[0]) - self.dut.session.copy_file_to(in_pcap) + self.sut_node.session.copy_file_to(in_pcap) command = ( "{} {} " @@ -92,21 +92,21 @@ class TestPmdPcap(TestCase): + "-- -i --port-topology=chained --no-flush-rx" ) - self.dut.send_expect( + self.sut_node.send_expect( command.format( - self.path, eal_para, TestPmdPcap.dut_pcap_files_path + in_pcap, out_pcap + self.path, eal_para, TestPmdPcap.sut_pcap_files_path + in_pcap, out_pcap ), "testpmd> ", 15, ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") sleep(2) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ") self.check_pcap_files( - TestPmdPcap.dut_pcap_files_path + in_pcap, + TestPmdPcap.sut_pcap_files_path + in_pcap, out_pcap, TestPmdPcap.pcap_file_sizes[0], ) @@ -119,14 +119,14 @@ class TestPmdPcap(TestCase): in_pcap2 = "in2_pmdpcap.pcap" out_pcap2 = "/tmp/out2_pmdpcap.pcap" - four_cores = self.dut.get_core_list("1S/4C/1T") + four_cores = self.sut_node.get_core_list("1S/4C/1T") core_mask = utils.create_mask(four_cores) - eal_para = self.dut.create_eal_parameters(cores="1S/4C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/4C/1T") self.create_pcap_file(in_pcap1, TestPmdPcap.pcap_file_sizes[0]) - self.dut.session.copy_file_to(in_pcap1) + self.sut_node.session.copy_file_to(in_pcap1) self.create_pcap_file(in_pcap2, TestPmdPcap.pcap_file_sizes[1]) - self.dut.session.copy_file_to(in_pcap2) + self.sut_node.session.copy_file_to(in_pcap2) command = ( "{} {} " @@ -135,35 +135,35 @@ class TestPmdPcap(TestCase): + "-- -i --no-flush-rx" ) - self.dut.send_expect( + self.sut_node.send_expect( command.format( self.path, eal_para, - TestPmdPcap.dut_pcap_files_path + in_pcap1, + TestPmdPcap.sut_pcap_files_path + in_pcap1, out_pcap1, - TestPmdPcap.dut_pcap_files_path + in_pcap2, + TestPmdPcap.sut_pcap_files_path + in_pcap2, out_pcap2, ), "testpmd> ", 15, ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") sleep(2) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ") self.check_pcap_files( - TestPmdPcap.dut_pcap_files_path + in_pcap1, + TestPmdPcap.sut_pcap_files_path + in_pcap1, out_pcap2, TestPmdPcap.pcap_file_sizes[0], ) self.check_pcap_files( - TestPmdPcap.dut_pcap_files_path + in_pcap2, + TestPmdPcap.sut_pcap_files_path + in_pcap2, out_pcap1, TestPmdPcap.pcap_file_sizes[1], ) def tear_down_all(self): - self.dut.set_target(self.target) + self.sut_node.set_target(self.target) diff --git a/tests/TestSuite_pmdrss_hash.py b/tests/TestSuite_pmdrss_hash.py index ff8d042f..5cd3f7f9 100644 --- a/tests/TestSuite_pmdrss_hash.py +++ b/tests/TestSuite_pmdrss_hash.py @@ -53,9 +53,9 @@ class TestPmdrssHash(TestCase): Sends packets. """ received_pkts = [] - self.tester.scapy_foreground() - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) # send packet with different source and dest ip if tran_type == "ipv4-other": @@ -64,8 +64,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="192.168.0.%d", dst="192.168.0.%d")], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp": for i in range(10): @@ -73,8 +73,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1024,dport=1024)], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-udp": for i in range(10): @@ -82,8 +82,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1024,dport=1024)], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-sctp": for i in range(10): @@ -91,8 +91,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1024,dport=1024,tag=1)], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-frag": for i in range(10): @@ -100,8 +100,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IP(src="192.168.0.%d", dst="192.168.0.%d",frag=1,flags="MF")], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "l2_payload": for i in range(10): @@ -109,8 +109,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(src="00:00:00:00:00:0%d",dst="%s")], iface="%s")' % (i + 1, mac, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-other": @@ -119,8 +119,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-tcp": for i in range(10): @@ -128,8 +128,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/TCP(sport=1024,dport=1024)], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-udp": for i in range(10): @@ -137,8 +137,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/UDP(sport=1024,dport=1024)], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-sctp": for i in range(10): @@ -146,8 +146,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d", nh=132)/SCTP(sport=1024,dport=1024,tag=1)], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-frag": for i in range(10): @@ -155,14 +155,14 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s", src=get_if_hwaddr("%s"))/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d",nh=44)/IPv6ExtHdrFragment()], iface="%s")' % (mac, itf, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.dut.get_session_output(timeout=1) - self.dut.send_expect("stop", "testpmd>") + out = self.sut_node.get_session_output(timeout=1) + self.sut_node.send_expect("stop", "testpmd>") lines = out.split("\r\n") reta_line = {} # collect the hash result and the queue id @@ -241,9 +241,9 @@ class TestPmdrssHash(TestCase): Sends packets. """ received_pkts = [] - self.tester.scapy_foreground() - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) # send packet with different source and dest ip if tran_type == "ipv4-other": @@ -252,13 +252,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp": @@ -267,13 +267,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1024,dport=1025)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1025,dport=1024)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-udp": for i in range(4): @@ -281,13 +281,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1024,dport=1025)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1025,dport=1024)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-sctp": for i in range(4): @@ -295,13 +295,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1024,dport=1025,tag=1)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1025,dport=1024,tag=1)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-frag": for i in range(10): @@ -309,13 +309,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d",frag=1,flags="MF")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d",frag=1,flags="MF")], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "l2_payload": for i in range(10): @@ -323,8 +323,8 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(src="00:00:00:00:00:%02d",dst="%s")], iface="%s")' % (i + 1, mac, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-other": for i in range(4): @@ -332,13 +332,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:3::%d", dst="3ffe:2501:200:1fff::%d")], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-tcp": @@ -347,13 +347,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/TCP(sport=1024,dport=1025)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:3::%d", dst="3ffe:2501:200:1fff::%d")/TCP(sport=1025,dport=1024)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-udp": @@ -362,13 +362,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/UDP(sport=1024,dport=1025)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/UDP(sport=1025,dport=1024)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-sctp": for i in range(4): @@ -376,13 +376,13 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d", nh=132)/SCTP(sport=1024,dport=1025,tag=1)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d", nh=132)/SCTP(sport=1025,dport=1024,tag=1)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-frag": for i in range(4): @@ -390,19 +390,19 @@ class TestPmdrssHash(TestCase): r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d",nh=44)/IPv6ExtHdrFragment()], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet2 = ( r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d",nh=44)/IPv6ExtHdrFragment()], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.dut.get_session_output(timeout=1) - self.dut.send_expect("stop", "testpmd>") + out = self.sut_node.get_session_output(timeout=1) + self.sut_node.send_expect("stop", "testpmd>") lines = out.split("\r\n") # collect the hash result of five tuple and the queue id @@ -510,106 +510,106 @@ class TestPmdrssHash(TestCase): reta_num = 512 else: self.verify(False, "NIC Unsupported:%s" % str(self.nic)) - ports = self.dut.get_ports(self.nic) + ports = self.sut_node.get_ports(self.nic) self.verify(len(ports) >= 1, "Not enough ports available") - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ Run before each test case. """ - cores = self.dut.get_core_list("all") - self.eal_para = self.dut.create_eal_parameters(cores=cores) + cores = self.sut_node.get_core_list("all") + self.eal_para = self.sut_node.create_eal_parameters(cores=cores) self.coremask = utils.create_mask(cores) def test_toeplitz(self): """ Test Case: test_toeplitz """ - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(localPort) + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(localPort) rule_action = "func toeplitz queues end / end" global reta_num global iptypes - self.dut.kill_all() + self.sut_node.kill_all() # test with different rss queues - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -i --rxq=%d --txq=%d" % (self.path, self.eal_para, queue, queue), "testpmd> ", 120, ) for iptype, rsstype in list(iptypes.items()): - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") rule_cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}" if "sctp" in iptype or "udp" in iptype or "tcp" in iptype: rule_cmd = rule_cmd.replace("/ ipv4 /", f"/ ipv4 / {rsstype} /") if "ipv6" in iptype: rule_cmd = rule_cmd.replace("ipv4", "ipv6") - outx = self.dut.send_expect(rule_cmd, "testpmd> ") + outx = self.sut_node.send_expect(rule_cmd, "testpmd> ") self.verify("created" in outx, "Create flow failed") - self.dut.send_expect("port start all", "testpmd> ") - out = self.dut.send_expect("port config all rss %s" % rsstype, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + out = self.sut_node.send_expect("port config all rss %s" % rsstype, "testpmd> ") self.verify( "error" not in out, "Configuration of RSS hash failed: Invalid argument" ) # configure the reta with specific mappings. for i in range(reta_num): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> " ) self.send_packet(itf, iptype) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_toeplitz_symmetric(self): """ Test Case: test_toeplitz_symmetric """ - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(localPort) + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(localPort) rule_action = "func symmetric_toeplitz queues end / end" global reta_num global iptypes - self.dut.kill_all() + self.sut_node.kill_all() # test with different rss queues - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -i --rxq=%d --txq=%d" % (self.path, self.eal_para, queue, queue), "testpmd> ", 120, ) for iptype, rsstype in list(iptypes.items()): - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") rule_cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}" if "sctp" in iptype or "udp" in iptype or "tcp" in iptype: rule_cmd = rule_cmd.replace("/ ipv4 /", f"/ ipv4 / {rsstype} /") if "ipv6" in iptype: rule_cmd = rule_cmd.replace("ipv4", "ipv6") - outx = self.dut.send_expect(rule_cmd, "testpmd> ") + outx = self.sut_node.send_expect(rule_cmd, "testpmd> ") self.verify("created" in outx, "Create flow failed") - self.dut.send_expect("port start all", "testpmd> ") - out = self.dut.send_expect("port config all rss %s" % rsstype, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + out = self.sut_node.send_expect("port config all rss %s" % rsstype, "testpmd> ") self.verify( "error" not in out, "Configuration of RSS hash failed: Invalid argument" ) @@ -617,30 +617,30 @@ class TestPmdrssHash(TestCase): # configure the reta with specific mappings. for i in range(reta_num): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> " ) self.send_packet_symmetric(itf, iptype) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_simple(self): """ Test Case: test_simple """ - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(localPort) + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(localPort) rule_action = "func simple_xor queues end / end" global reta_num global iptypes - self.dut.kill_all() + self.sut_node.kill_all() # test with different rss queues - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -i --rxq=%d --txq=%d" % (self.path, self.eal_para, queue, queue), "testpmd> ", 120, @@ -651,78 +651,78 @@ class TestPmdrssHash(TestCase): "***********************%s rss test********************************" % iptype ) - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") # some nic not support change hash algorithm - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") rule_cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}" if "sctp" in iptype or "udp" in iptype or "tcp" in iptype: rule_cmd = rule_cmd.replace("/ ipv4 /", f"/ ipv4 / {rsstype} /") if "ipv6" in iptype: rule_cmd = rule_cmd.replace("ipv4", "ipv6") - outx = self.dut.send_expect(rule_cmd, "testpmd> ") + outx = self.sut_node.send_expect(rule_cmd, "testpmd> ") self.verify("created" in outx, "Create flow failed") - self.dut.send_expect("port start all", "testpmd> ") - out = self.dut.send_expect("port config all rss %s" % rsstype, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + out = self.sut_node.send_expect("port config all rss %s" % rsstype, "testpmd> ") self.verify( "error" not in out, "Configuration of RSS hash failed: Invalid argument" ) # configure the reta with specific mappings. for i in range(reta_num): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> " ) self.send_packet(itf, iptype) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_simple_symmetric(self): - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(localPort) + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(localPort) global reta_num global iptypes - self.dut.kill_all() + self.sut_node.kill_all() # test with different rss queues - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -i --rxq=%d --txq=%d" % (self.path, self.eal_para, queue, queue), "testpmd> ", 120, ) for iptype, rsstype in list(iptypes.items()): - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect( "set_hash_global_config 0 simple_xor %s enable" % iptype, "testpmd> " ) - self.dut.send_expect("set_sym_hash_ena_per_port 0 enable", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("set_sym_hash_ena_per_port 0 enable", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") - out = self.dut.send_expect("port config all rss %s" % rsstype, "testpmd> ") + out = self.sut_node.send_expect("port config all rss %s" % rsstype, "testpmd> ") self.verify( "error" not in out, "Configuration of RSS hash failed: Invalid argument" ) # configure the reta with specific mappings. for i in range(reta_num): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> " ) self.send_packet_symmetric(itf, iptype) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def tear_down(self): """ diff --git a/tests/TestSuite_pmdrssreta.py b/tests/TestSuite_pmdrssreta.py index c4bc1ee4..0f3624ac 100644 --- a/tests/TestSuite_pmdrssreta.py +++ b/tests/TestSuite_pmdrssreta.py @@ -31,11 +31,11 @@ class TestPmdrssreta(TestCase): """ global reta_lines - self.tester.scapy_foreground() - self.tester.scapy_append('sys.path.append("./")') - self.tester.scapy_append("from sctp import *") - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('sys.path.append("./")') + self.tg_node.scapy_append("from sctp import *") + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) # send packet with different source and dest ip if tran_type == "IPV4": for i in range(16): @@ -43,8 +43,8 @@ class TestPmdrssreta(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV4&TCP": for i in range(16): @@ -52,8 +52,8 @@ class TestPmdrssreta(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV4&UDP": for i in range(16): @@ -61,8 +61,8 @@ class TestPmdrssreta(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV6": for i in range(16): @@ -70,8 +70,8 @@ class TestPmdrssreta(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV6&TCP": for i in range(16): @@ -79,8 +79,8 @@ class TestPmdrssreta(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/TCP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "IPV6&UDP": for i in range(16): @@ -88,13 +88,13 @@ class TestPmdrssreta(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/UDP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.dut.get_session_output(timeout=1) - self.dut.send_expect("stop", "testpmd>") + out = self.sut_node.get_session_output(timeout=1) + self.sut_node.send_expect("stop", "testpmd>") lines = out.split("\r\n") reta_line = {} @@ -182,14 +182,14 @@ class TestPmdrssreta(TestCase): """ Run at the start of each test suite. """ - cores = self.dut.get_core_list("all") + cores = self.sut_node.get_core_list("all") self.coremask = utils.create_mask(cores) - ports = self.dut.get_ports(self.nic) - self.ports_socket = self.dut.get_numa_id(ports[0]) + ports = self.sut_node.get_ports(self.nic) + self.ports_socket = self.sut_node.get_numa_id(ports[0]) self.verify(len(ports) >= 1, "Not enough ports available") - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ @@ -198,9 +198,9 @@ class TestPmdrssreta(TestCase): pass def test_pmdrss_reta(self): - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(localPort) + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(localPort) iptypes = { "IPV4": "ip", "IPV4&UDP": "udp", @@ -210,7 +210,7 @@ class TestPmdrssreta(TestCase): "IPV6&TCP": "tcp", } - self.dut.kill_all() + self.sut_node.kill_all() global testQueues if self.nic == "IGC-I225_LM": testQueues = [2] @@ -231,11 +231,11 @@ class TestPmdrssreta(TestCase): ) for iptype, rsstype in list(iptypes.items()): - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "port config all rss %s" % rsstype, "testpmd> " ) self.verify( @@ -247,35 +247,35 @@ class TestPmdrssreta(TestCase): if self.nic in ["cavium_a063", "cavium_a064"]: for i in range(64): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ", ) elif self.nic in ["hi1822"]: for i in range(256): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ", ) elif self.nic in ["IXGBE_10G-82599_SFP", "IGC-I225_LM"]: for i in range(128): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ", ) else: for i in range(512): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ", ) self.send_packet(itf, iptype) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_rss_key_size(self): nic_rss_key_size = { @@ -303,15 +303,15 @@ class TestPmdrssreta(TestCase): "Not supporte rss key on %s" % self.nic, ) - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - itf = self.tester.get_interface(localPort) - self.dut.kill_all() + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + itf = self.tg_node.get_interface(localPort) + self.sut_node.kill_all() self.pmdout.start_testpmd("all", "--rxq=2 --txq=2") - self.dut.send_expect("start", "testpmd> ", 120) - out = self.dut.send_expect("show port info all", "testpmd> ", 120) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("start", "testpmd> ", 120) + out = self.sut_node.send_expect("show port info all", "testpmd> ", 120) + self.sut_node.send_expect("quit", "# ", 30) pattern = re.compile("Hash key size in bytes:\s(\d+)") m = pattern.search(out) @@ -339,4 +339,4 @@ class TestPmdrssreta(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_port_control.py b/tests/TestSuite_port_control.py index d571b01f..a5e806b6 100644 --- a/tests/TestSuite_port_control.py +++ b/tests/TestSuite_port_control.py @@ -6,7 +6,7 @@ import os import re import time -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.pmd_output import PmdOutput from framework.test_case import TestCase @@ -23,19 +23,19 @@ class TestPortControl(TestCase): self.env_done = False self.port_id_0 = 0 self.pkt_count = 1000 - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.pf_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.pf_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.vf_mac = "00:01:23:45:67:89" - self.txitf = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.txitf = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - port = self.dut.ports_info[0]["port"] + self.sut_node.send_expect("modprobe vfio-pci", "#") + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + port = self.sut_node.ports_info[0]["port"] self.pf_default_driver = port.get_nic_driver() def set_up(self): @@ -52,11 +52,11 @@ class TestPortControl(TestCase): return # bind to default driver - self.bind_nic_driver(self.dut_ports[:1], driver="") - self.used_dut_port = self.dut_ports[0] - self.host_intf = self.dut.ports_info[self.used_dut_port]["intf"] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.bind_nic_driver(self.sut_ports[:1], driver="") + self.used_sut_port = self.sut_ports[0] + self.host_intf = self.sut_node.ports_info[self.used_sut_port]["intf"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] try: for port in self.sriov_vfs_port: port.bind_driver(self.vf_assign_method) @@ -64,15 +64,15 @@ class TestPortControl(TestCase): vf_popt = {"opt_host": self.sriov_vfs_port[0].pci} # set up VM ENV - self.vm = VM(self.dut, "vm0", "port_control") + self.vm = VM(self.sut_node, "vm0", "port_control") self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt) - self.vm_dut = self.vm.start() - if self.vm_dut is None: + self.vm_sut = self.vm.start() + if self.vm_sut is None: raise Exception("Set up VM ENV failed!") else: - self.start_vf_pmd(self.vm_dut) + self.start_vf_pmd(self.vm_sut) - self.vm_testpmd = PmdOutput(self.vm_dut) + self.vm_testpmd = PmdOutput(self.vm_sut) except Exception as e: self.destroy_vm_env() @@ -82,20 +82,20 @@ class TestPortControl(TestCase): def destroy_vm_env(self): if getattr(self, "vm", None): - if getattr(self, "vm_dut", None): - self.vm_dut.kill_all() + if getattr(self, "vm_sut", None): + self.vm_sut.kill_all() self.vm_testpmd = None - self.vm_dut_ports = None + self.vm_sut_ports = None # destroy vm0 self.vm.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() time.sleep(3) self.vm = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - self.used_dut_port = None - self.bind_nic_driver(self.dut_ports[:1], driver=self.pf_default_driver) + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + self.used_sut_port = None + self.bind_nic_driver(self.sut_ports[:1], driver=self.pf_default_driver) self.env_done = False @@ -175,11 +175,11 @@ class TestPortControl(TestCase): else: self.dts_mac = self.vf_mac - self.pkt = packet.Packet('Ether(dst="%s")/IP()/Raw("x"*40)' % self.dts_mac) + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder('Ether(dst="%s")/IP()/Raw("x"*40)' % self.dts_mac) pf_start_stats = terminal.get_pmd_stats(self.port_id_0) - self.pkt.send_pkt( - crb=self.tester, tx_port=self.txitf, count=self.pkt_count, timeout=30 + self.scapy_pkt_builder.send_pkt( + node=self.tg_node, tx_port=self.txitf, count=self.pkt_count, timeout=30 ) pf_end_stats = terminal.get_pmd_stats(self.port_id_0) pf_ret_stats = self.calculate_stats(pf_start_stats, pf_end_stats) @@ -239,4 +239,4 @@ class TestPortControl(TestCase): """ if self.env_done: self.destroy_vm_env() - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_port_representor.py b/tests/TestSuite_port_representor.py index eb0ddba5..e9dd37f4 100644 --- a/tests/TestSuite_port_representor.py +++ b/tests/TestSuite_port_representor.py @@ -12,9 +12,9 @@ independent on the control plane and data plane. import re import time -from framework.dut import Dut -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.sut_node import SutNode from framework.test_case import TestCase @@ -36,35 +36,35 @@ class TestPortRepresentor(TestCase): ], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() - self.pmdout_pf = PmdOutput(self.dut) - self.pmdout_vf0 = PmdOutput(self.dut, self.session_secondary) - self.pmdout_vf1 = PmdOutput(self.dut, self.session_third) + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() + self.pmdout_pf = PmdOutput(self.sut_node) + self.pmdout_vf0 = PmdOutput(self.sut_node, self.session_secondary) + self.pmdout_vf1 = PmdOutput(self.sut_node, self.session_third) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.tester_mac = self.tester.get_mac(localPort) - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.tg_mac = self.tg_node.get_mac(localPort) + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.unicast_mac = "00:11:22:33:44:55" # This is to set up 1pf and 2vfs environment # PF is bound to igb_uio, while VF is bound to vfio-pci. - self.dut.generate_sriov_vfs_by_port(self.dut_ports[0], 2, "igb_uio") - self.two_vfs_port = self.dut.ports_info[self.dut_ports[0]]["vfs_port"] - self.dut.send_expect("modprobe vfio-pci", "#", 3) + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[0], 2, "igb_uio") + self.two_vfs_port = self.sut_node.ports_info[self.sut_ports[0]]["vfs_port"] + self.sut_node.send_expect("modprobe vfio-pci", "#", 3) try: for port in self.two_vfs_port: port.bind_driver(driver="vfio-pci") except Exception as e: self.destroy_env() raise Exception(e) - self.vfs_pci = self.dut.ports_info[self.dut_ports[0]]["sriov_vfs_pci"] + self.vfs_pci = self.sut_node.ports_info[self.sut_ports[0]]["sriov_vfs_pci"] def set_up(self): """ @@ -141,14 +141,14 @@ class TestPortRepresentor(TestCase): self.pmdout_vf1.execute_cmd("set promisc 0 off", "testpmd>") self.pmdout_vf1.execute_cmd("start", "testpmd>", 2) # send 30 packets - pkt1 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.pf_mac) - pkt2 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.vf0_mac) - pkt3 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.vf1_mac) + pkt1 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.pf_mac) + pkt2 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.vf0_mac) + pkt3 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.vf1_mac) pkts = [pkt1, pkt2, pkt3] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf, count=10) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=10) # check port stats in control testpmd result_before = self.check_port_stats() self.verify( @@ -176,15 +176,15 @@ class TestPortRepresentor(TestCase): # set vf promisc enable and send 40 packets self.pmdout_pf.execute_cmd("set promisc 1 on", "testpmd>") - pkt1 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.pf_mac) - pkt2 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.vf0_mac) - pkt3 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.vf1_mac) - pkt4 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.unicast_mac) + pkt1 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.pf_mac) + pkt2 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.vf0_mac) + pkt3 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.vf1_mac) + pkt4 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.unicast_mac) pkts = [pkt1, pkt2, pkt3, pkt4] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf, count=10) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=10) # check port stats in control testpmd result_enable = self.check_port_stats() self.verify( @@ -194,10 +194,10 @@ class TestPortRepresentor(TestCase): self.clear_port_stats() # set vf promisc disable and send 40 packets self.pmdout_pf.execute_cmd("set promisc 1 off", "testpmd>") - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf, count=10) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=10) # check port stats in control testpmd result_disable = self.check_port_stats() self.verify( @@ -221,15 +221,15 @@ class TestPortRepresentor(TestCase): self.pmdout_vf1.execute_cmd("set promisc 0 off", "testpmd>") self.pmdout_vf1.execute_cmd("start", "testpmd>", 2) # send 40 packets - pkt1 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.pf_mac) - pkt2 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.vf0_mac) - pkt3 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.vf1_mac) - pkt4 = 'Ether(src="%s",dst="%s")/IP()' % (self.tester_mac, self.unicast_mac) + pkt1 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.pf_mac) + pkt2 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.vf0_mac) + pkt3 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.vf1_mac) + pkt4 = 'Ether(src="%s",dst="%s")/IP()' % (self.tg_mac, self.unicast_mac) pkts = [pkt1, pkt2, pkt3, pkt4] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf, count=10) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=10) # check port stats in control testpmd result = self.check_port_stats() self.verify( @@ -254,18 +254,18 @@ class TestPortRepresentor(TestCase): self.pmdout_vf1.execute_cmd("start", "testpmd>", 2) # send 20 packets pkt1 = 'Ether(src="%s",dst="%s")/Dot1Q(vlan=3)/IP()' % ( - self.tester_mac, + self.tg_mac, self.vf0_mac, ) pkt2 = 'Ether(src="%s",dst="%s")/Dot1Q(vlan=4)/IP()' % ( - self.tester_mac, + self.tg_mac, self.vf1_mac, ) pkts = [pkt1, pkt2] - p = Packet() + scapy_pkt_builder = ScapyPacketBuilder() for i in pkts: - p.append_pkt(i) - p.send_pkt(self.tester, tx_port=self.tester_itf, count=10) + scapy_pkt_builder.append_pkt(i) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_itf, count=10) # check port stats in control testpmd result = self.check_port_stats() self.verify( @@ -282,7 +282,7 @@ class TestPortRepresentor(TestCase): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0]) - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) + self.sut_node.kill_all() + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[0]) + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) diff --git a/tests/TestSuite_power_bidirection_channel.py b/tests/TestSuite_power_bidirection_channel.py index a2affd8e..655d61bb 100644 --- a/tests/TestSuite_power_bidirection_channel.py +++ b/tests/TestSuite_power_bidirection_channel.py @@ -21,23 +21,23 @@ class TestPowerBidirectionChannel(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir def get_cores_mask(self, config="all"): - ports_socket = self.dut.get_numa_id(self.dut.get_ports()[0]) - mask = dts_create_mask(self.dut.get_core_list(config, socket=ports_socket)) + ports_socket = self.sut_node.get_numa_id(self.sut_node.get_ports()[0]) + mask = dts_create_mask(self.sut_node.get_core_list(config, socket=ports_socket)) return mask - def prepare_binary(self, name, host_crb=None): - _host_crb = host_crb if host_crb else self.dut + def prepare_binary(self, name, host_node=None): + _host_node = host_node if host_node else self.sut_node example_dir = "examples/" + name - out = _host_crb.build_dpdk_apps("./" + example_dir) + out = _host_node.build_dpdk_apps("./" + example_dir) return os.path.join( - self.target_dir, _host_crb.apps_name[os.path.basename(name)] + self.target_dir, _host_node.apps_name[os.path.basename(name)] ) def add_console(self, session): @@ -48,10 +48,10 @@ class TestPowerBidirectionChannel(TestCase): def get_console(self, name): default_con_table = { - self.dut.session.name: [self.dut.send_expect, self.dut.get_session_output], - self.dut.alt_session.name: [ - self.dut.alt_session.send_expect, - self.dut.alt_session.session.get_output_all, + self.sut_node.session.name: [self.sut_node.send_expect, self.sut_node.get_session_output], + self.sut_node.alt_session.name: [ + self.sut_node.alt_session.send_expect, + self.sut_node.alt_session.session.get_output_all, ], } if name not in default_con_table: @@ -59,7 +59,7 @@ class TestPowerBidirectionChannel(TestCase): else: return default_con_table.get(name) - def execute_cmds(self, cmds, name="dut"): + def execute_cmds(self, cmds, name="sut"): console, msg_pipe = self.get_console(name) if len(cmds) == 0: return @@ -95,13 +95,13 @@ class TestPowerBidirectionChannel(TestCase): return outputs def d_con(self, cmds): - return self.execute_cmds(cmds, name=self.dut.session.name) + return self.execute_cmds(cmds, name=self.sut_node.session.name) def d_a_con(self, cmds): - return self.execute_cmds(cmds, name=self.dut.alt_session.name) + return self.execute_cmds(cmds, name=self.sut_node.alt_session.name) def vm_con(self, cmds): - return self.execute_cmds(cmds, name=self.vm_dut.session.name) + return self.execute_cmds(cmds, name=self.vm_sut.session.name) def vm_g_con(self, cmds): return self.execute_cmds(cmds, name=self.guest_con_name) @@ -131,7 +131,7 @@ class TestPowerBidirectionChannel(TestCase): get all cpus' base_frequency value, if not support pbf, set all to 0 """ if not self.is_support_pbf: - cpu_topos = self.dut.get_all_cores() + cpu_topos = self.sut_node.get_all_cores() _base_freqs_info = {} for index, _ in enumerate(cpu_topos): _base_freqs_info[index] = 0 @@ -139,8 +139,8 @@ class TestPowerBidirectionChannel(TestCase): # if cpu support high priority core key_values = ["base_frequency", "cpuinfo_max_freq", "cpuinfo_min_freq"] freq = r"/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - # use dut alt session to get dut platform cpu base frequency attribute - cpu_topos = self.dut.get_all_cores() + # use SUT alt session to get SUT platform cpu base frequency attribute + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo["thread"]) @@ -184,7 +184,7 @@ class TestPowerBidirectionChannel(TestCase): self.vcpu_map ) = ( self.vcpu_lst - ) = self.vm_dut = self.guest_session = self.is_guest_on = self.is_vm_on = None + ) = self.vm_sut = self.guest_session = self.is_guest_on = self.is_vm_on = None # vm config self.vm_name = "vm0" self.vm_max_ch = 8 @@ -200,9 +200,9 @@ class TestPowerBidirectionChannel(TestCase): # set vm initialize parameters self.init_vms_params() # start vm - self.vm = LibvirtKvm(self.dut, self.vm_name, self.suite_name) + self.vm = LibvirtKvm(self.sut_node, self.vm_name, self.suite_name) # pass pf to virtual machine - pci_addr = self.dut.get_port_pci(self.dut_ports[0]) + pci_addr = self.sut_node.get_port_pci(self.sut_ports[0]) # add channel ch_name = "virtio.serial.port.poweragent.{0}" vm_path = os.path.join(self.vm_log_dir, "{0}.{1}") @@ -213,25 +213,25 @@ class TestPowerBidirectionChannel(TestCase): } self.vm.add_vm_virtio_serial_channel(**channel) # boot up vm - self.vm_dut = self.vm.start() + self.vm_sut = self.vm.start() self.is_vm_on = True - self.verify(self.vm_dut, "create vm_dut fail !") - self.add_console(self.vm_dut.session) + self.verify(self.vm_sut, "create vm_sut fail !") + self.add_console(self.vm_sut.session) # get virtual machine cpu cores _vcpu_map = self.vm.get_vm_cpu() self.vcpu_map = [int(item) for item in _vcpu_map] - self.vcpu_lst = [int(item["core"]) for item in self.vm_dut.cores] + self.vcpu_lst = [int(item["core"]) for item in self.vm_sut.cores] def close_vm(self): # close vm if self.is_vm_on: if self.guest_session: - self.vm_dut.close_session(self.guest_session) + self.vm_sut.close_session(self.guest_session) self.guest_session = None self.vm.stop() self.is_vm_on = False self.vm = None - self.dut.virt_exit() + self.sut_node.virt_exit() cmd_fmt = "virsh {0} {1} > /dev/null 2>&1".format cmds = [ [cmd_fmt("shutdown", self.vm_name), "# "], @@ -246,7 +246,7 @@ class TestPowerBidirectionChannel(TestCase): option = (" -v " "-c {core_mask} " "-n {mem_channel} " "--no-pci ").format( **{ "core_mask": self.get_cores_mask("1S/12C/1T"), - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), } ) prompt = "vmpower>" @@ -279,9 +279,9 @@ class TestPowerBidirectionChannel(TestCase): def init_guest_mgr(self): name = "vm_power_manager/guest_cli" - self.guest_cli = self.prepare_binary(name, host_crb=self.vm_dut) - self.guest_con_name = "_".join([self.vm_dut.NAME, name.replace("/", "-")]) - self.guest_session = self.vm_dut.create_session(self.guest_con_name) + self.guest_cli = self.prepare_binary(name, host_node=self.vm_sut) + self.guest_con_name = "_".join([self.vm_sut.NAME, name.replace("/", "-")]) + self.guest_session = self.vm_sut.create_session(self.guest_con_name) self.add_console(self.guest_session) def start_guest_mgr(self): @@ -299,7 +299,7 @@ class TestPowerBidirectionChannel(TestCase): ).format( **{ "core_mask": "0xfe", - "memory_channel": self.vm_dut.get_memory_channels(), + "memory_channel": self.vm_sut.get_memory_channels(), "memory_size": 1024, "file_prefix": "vmpower1", "vm_name": self.vm_name, @@ -348,7 +348,7 @@ class TestPowerBidirectionChannel(TestCase): cmd = "whereis cpupower > /dev/null 2>&1; echo $?" output = self.d_a_con(cmd) status = True if output and output.strip() == "0" else False - msg = "cpupower tool have not installed on DUT" + msg = "cpupower tool have not installed on SUT" self.verify(status, msg) def check_policy_command_acked_output(self): @@ -494,7 +494,7 @@ class TestPowerBidirectionChannel(TestCase): def verify_power_driver(self): expected_drv = "acpi-cpufreq" power_drv = self.get_sys_power_driver() - msg = "{0} should work with {1} driver on DUT".format( + msg = "{0} should work with {1} driver on SUT".format( self.suite_name, expected_drv ) self.verify(power_drv == expected_drv, msg) @@ -505,7 +505,7 @@ class TestPowerBidirectionChannel(TestCase): # modprobe msr module to let the application can get the CPU HW info self.d_a_con("modprobe msr") self.d_a_con("cpupower frequency-set -g userspace") - self.dut.init_core_list_uncached_linux() + self.sut_node.init_core_list_uncached_linux() # check if cpu support bpf feature self.base_freqs_info = self.get_all_cpu_attrs() # boot up vm @@ -522,8 +522,8 @@ class TestPowerBidirectionChannel(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports") self.check_cpupower_tool() self.verify_power_driver() # prepare testing environment @@ -545,8 +545,8 @@ class TestPowerBidirectionChannel(TestCase): """ Run after each test case. """ - self.vm_dut.kill_all() - self.dut.kill_all() + self.vm_sut.kill_all() + self.sut_node.kill_all() def test_policy_command_acked_action(self): """ diff --git a/tests/TestSuite_power_branch_ratio.py b/tests/TestSuite_power_branch_ratio.py index 91454af8..d9356203 100644 --- a/tests/TestSuite_power_branch_ratio.py +++ b/tests/TestSuite_power_branch_ratio.py @@ -15,9 +15,8 @@ from copy import deepcopy from pprint import pformat from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT -from framework.settings import HEADER_SIZE +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import HEADER_SIZE, TRANSMIT_CONT from framework.test_case import TestCase from framework.utils import create_mask as dts_create_mask @@ -28,24 +27,24 @@ class TestPowerBranchRatio(TestCase): @property def target_dir(self): target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir - def get_cores_mask(self, config="all", crb=None): - _crb = crb if crb else self.dut - ports_socket = 0 if crb else _crb.get_numa_id(_crb.get_ports()[0]) - mask = dts_create_mask(_crb.get_core_list(config, socket=ports_socket)) + def get_cores_mask(self, config="all", node=None): + _node = node if node else self.sut_node + ports_socket = 0 if node else _node.get_numa_id(_node.get_ports()[0]) + mask = dts_create_mask(_node.get_core_list(config, socket=ports_socket)) return mask - def prepare_binary(self, name, host_crb=None): - _host_crb = host_crb if host_crb else self.dut + def prepare_binary(self, name, host_node=None): + _host_node = host_node if host_node else self.sut_node example_dir = "examples/" + name - out = _host_crb.build_dpdk_apps("./" + example_dir) + out = _host_node.build_dpdk_apps("./" + example_dir) return os.path.join( - self.target_dir, _host_crb.apps_name[os.path.basename(name)] + self.target_dir, _host_node.apps_name[os.path.basename(name)] ) def add_console(self, session): @@ -56,10 +55,10 @@ class TestPowerBranchRatio(TestCase): def get_console(self, name): default_con_table = { - self.dut.session.name: [self.dut.send_expect, self.dut.get_session_output], - self.dut.alt_session.name: [ - self.dut.alt_session.send_expect, - self.dut.alt_session.session.get_output_all, + self.sut_node.session.name: [self.sut_node.send_expect, self.sut_node.get_session_output], + self.sut_node.alt_session.name: [ + self.sut_node.alt_session.send_expect, + self.sut_node.alt_session.session.get_output_all, ], } if name not in default_con_table: @@ -67,7 +66,7 @@ class TestPowerBranchRatio(TestCase): else: return default_con_table.get(name) - def execute_cmds(self, cmds, name="dut"): + def execute_cmds(self, cmds, name="sut"): if len(cmds) == 0: return if isinstance(cmds, str): @@ -96,11 +95,11 @@ class TestPowerBranchRatio(TestCase): def d_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def d_a_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.alt_session.send_expect(*_cmd) + return self.sut_node.alt_session.send_expect(*_cmd) def d_sys_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd @@ -111,8 +110,8 @@ class TestPowerBranchRatio(TestCase): pktlen = frame_size - headers_size return pktlen - def config_stream(self, dut_port_id, stm_name): - dmac = self.dut.get_mac_address(dut_port_id) + def config_stream(self, sut_port_id, stm_name): + dmac = self.sut_node.get_mac_address(sut_port_id) # set streams for traffic pkt_name = "udp" pkt_configs = { @@ -134,22 +133,22 @@ class TestPowerBranchRatio(TestCase): values = pkt_configs[stm_name] pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - return pkt.pktgen.pkt + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + return scapy_pkt_builder.scapy_pkt_util.pkt def add_stream_to_pktgen(self, txport, rxport, send_pkts, option): stream_ids = [] _option = deepcopy(option) _option["pcap"] = send_pkts[0] - stream_id = self.tester.pktgen.add_stream(txport, rxport, send_pkts[0]) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, send_pkts[0]) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) _option = deepcopy(option) _option["pcap"] = send_pkts[1] - stream_id = self.tester.pktgen.add_stream(rxport, txport, send_pkts[1]) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(rxport, txport, send_pkts[1]) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) return stream_ids @@ -160,8 +159,8 @@ class TestPowerBranchRatio(TestCase): duration = option.get("duration", 15) send_pkts = option.get("stream") or [] # clear streams before add new streams - self.tester.pktgen.clear_streams() - # set stream into pktgen + self.tg_node.perf_tg.clear_streams() + # set stream into traffic generator s_option = { "stream_config": { "txmode": {}, @@ -177,21 +176,21 @@ class TestPowerBranchRatio(TestCase): "interval": duration - 2, "duration": duration, } - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) return result def run_traffic(self, option): - tester_tx_port_id = self.tester.get_local_port(self.dut_ports[0]) - tester_rx_port_id = self.tester.get_local_port(self.dut_ports[1]) + tg_tx_port_id = self.tg_node.get_local_port(self.sut_ports[0]) + tg_rx_port_id = self.tg_node.get_local_port(self.sut_ports[1]) stm_type = option.get("stm_type") duration = option.get("duration", None) or 15 ports_topo = { - "tx_intf": tester_tx_port_id, - "rx_intf": tester_rx_port_id, + "tx_intf": tg_tx_port_id, + "rx_intf": tg_rx_port_id, "stream": [ - self.config_stream(self.dut_ports[0], stm_type), - self.config_stream(self.dut_ports[1], stm_type), + self.config_stream(self.sut_ports[0], stm_type), + self.config_stream(self.sut_ports[1], stm_type), ], "duration": duration, } @@ -212,7 +211,7 @@ class TestPowerBranchRatio(TestCase): def restore_port_drv(self): driver = self.drivername - for port in self.dut.ports_info: + for port in self.sut_node.ports_info: netdev = port.get("port") if not netdev: continue @@ -230,7 +229,7 @@ class TestPowerBranchRatio(TestCase): ).format( **{ "core_mask": self.get_cores_mask("1S/3C/1T"), - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "memory_size": 1024, } ) @@ -252,11 +251,11 @@ class TestPowerBranchRatio(TestCase): self.d_con(["quit", "# ", 30]) self.is_mgr_on = False - def add_alternative_session_to_dut(self): - self.alt_sys_session = self.dut.create_session("alt_sys_session") + def add_alternative_session_to_sut(self): + self.alt_sys_session = self.sut_node.create_session("alt_sys_session") def init_testpmd(self): - self.testpmd = os.path.join(self.target_dir, self.dut.apps_name["test-pmd"]) + self.testpmd = os.path.join(self.target_dir, self.sut_node.apps_name["test-pmd"]) def start_testpmd(self): cores = [] @@ -273,7 +272,7 @@ class TestPowerBranchRatio(TestCase): ).format( **{ "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "memsize": 1024, "file-prefix": "vmpower2", } @@ -322,7 +321,7 @@ class TestPowerBranchRatio(TestCase): """get all cpus' attribute""" cpu_attrs = ["cpuinfo_max_freq", "cpuinfo_min_freq"] freq = "/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - cpu_topos = self.dut.get_all_cores() + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo["thread"]) @@ -381,7 +380,7 @@ class TestPowerBranchRatio(TestCase): check the branch miss ration and the related CPU frequency, the core used by testpmd as worker core will be shown as branch ratio value. """ - output = self.dut.get_session_output(timeout=2) + output = self.sut_node.get_session_output(timeout=2) msg = "virtual machine testpmd has not output message" self.verify(output, msg) pat = ".*\s+(\d+): ([0-9\.]+) \{(\d+)\} \{(\d+)\}.*" @@ -446,7 +445,7 @@ class TestPowerBranchRatio(TestCase): def verify_power_driver(self): expected_drv = "intel_pstate" power_drv = self.get_sys_power_driver() - msg = "{0} should work with {1} driver on DUT".format( + msg = "{0} should work with {1} driver on SUT".format( self.suite_name, expected_drv ) self.verify(power_drv == expected_drv, msg) @@ -455,7 +454,7 @@ class TestPowerBranchRatio(TestCase): cmd = "whereis cpupower > /dev/null 2>&1; echo $?" output = self.d_a_con(cmd) status = True if output and output.strip() == "0" else False - msg = "cpupower tool have not installed on DUT" + msg = "cpupower tool have not installed on SUT" self.verify(status, msg) def init_params(self): @@ -465,7 +464,7 @@ class TestPowerBranchRatio(TestCase): self.branch_ratio = None def preset_test_environment(self): - self.dut.init_core_list_uncached_linux() + self.sut_node.init_core_list_uncached_linux() self.cpu_info = self.get_all_cpu_attrs() # modprobe msr module to let the application can get the CPU HW info self.d_a_con("modprobe msr") @@ -473,7 +472,7 @@ class TestPowerBranchRatio(TestCase): # init binary self.init_vm_power_mgr() self.init_testpmd() - self.add_alternative_session_to_dut() + self.add_alternative_session_to_sut() test_content = self.get_suite_cfg() self.frame_size = test_content.get("frame_size") or 1024 self.check_ratio = test_content.get("check_ratio") or 0.1 @@ -481,7 +480,7 @@ class TestPowerBranchRatio(TestCase): self.to_core = test_content.get("to_core") self.check_core = test_content.get("check_core") self.testpmd_cores = test_content.get("testpmd_cores") - msg = "select dut core {} as check core".format(self.check_core) + msg = "select SUT core {} as check core".format(self.check_core) self.logger.info(msg) # @@ -493,8 +492,8 @@ class TestPowerBranchRatio(TestCase): Run at the start of each test suite. """ self.init_params() - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports") self.check_cpupower_tool() self.verify_power_driver() # prepare testing environment @@ -516,7 +515,7 @@ class TestPowerBranchRatio(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def test_perf_set_branch_ratio_rate_by_user(self): """ diff --git a/tests/TestSuite_power_empty_poll.py b/tests/TestSuite_power_empty_poll.py index 6db9d054..636918bb 100644 --- a/tests/TestSuite_power_empty_poll.py +++ b/tests/TestSuite_power_empty_poll.py @@ -13,9 +13,8 @@ from copy import deepcopy from pprint import pformat from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT -from framework.settings import HEADER_SIZE, PKTGEN_TREX +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import HEADER_SIZE, TG_TREX, TRANSMIT_CONT from framework.test_case import TestCase from framework.utils import create_mask as dts_create_mask @@ -30,32 +29,32 @@ class TestPowerEmptyPoll(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @property def is_use_trex(self): return ( - hasattr(self.tester, "is_pktgen") - and self.tester.is_pktgen - and self.tester.pktgen.pktgen_type == PKTGEN_TREX + hasattr(self.tg_node, "is_pktgen") + and self.tg_node.uses_perf_tg + and self.tg_node.perf_tg.tg_type == TG_TREX ) def d_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def d_a_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.alt_session.send_expect(*_cmd) + return self.sut_node.alt_session.send_expect(*_cmd) def prepare_binary(self, name): example_dir = "examples/" + name - out = self.dut.build_dpdk_apps("./" + example_dir) - return os.path.join(self.target_dir, self.dut.apps_name[os.path.basename(name)]) + out = self.sut_node.build_dpdk_apps("./" + example_dir) + return os.path.join(self.target_dir, self.sut_node.apps_name[os.path.basename(name)]) def get_cores_mask(self, cores_list): return dts_create_mask(cores_list) @@ -66,26 +65,26 @@ class TestPowerEmptyPoll(TestCase): for pkt in send_pkts: _option = deepcopy(option) _option["pcap"] = pkt - stream_id = self.tester.pktgen.add_stream(txport, rxport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) # rxport -> txport - stream_id = self.tester.pktgen.add_stream(rxport, txport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(rxport, txport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) cnt += 1 return stream_ids def run_traffic(self, option): - txport = self.tester.get_local_port(self.dut_ports[0]) - rxport = self.tester.get_local_port(self.dut_ports[1]) + txport = self.tg_node.get_local_port(self.sut_ports[0]) + rxport = self.tg_node.get_local_port(self.sut_ports[1]) stm_type = option.get("stm_types") rate_percent = option.get("rate", float(100)) duration = option.get("duration", 10) send_pkts = self.set_stream(stm_type) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # set stream into pktgen + self.tg_node.perf_tg.clear_streams() + # set stream into traffic generator s_option = { "stream_config": { "txmode": {}, @@ -96,8 +95,8 @@ class TestPowerEmptyPoll(TestCase): stream_ids = self.add_stream_to_pktgen(txport, rxport, send_pkts, s_option) # run traffic options traffic_opt = option.get("traffic_opt") - # run pktgen(ixia/trex) traffic - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + # run traffic generator (ixia/trex) traffic + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) return result @@ -128,10 +127,10 @@ class TestPowerEmptyPoll(TestCase): values = pkt_configs[stm_name] pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - streams.append(pkt.pktgen.pkt) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + streams.append(scapy_pkt_builder.scapy_pkt_util.pkt) return streams @@ -162,7 +161,7 @@ class TestPowerEmptyPoll(TestCase): **{ "core": core[-1], "core_mask": self.get_cores_mask(core), - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "empty-poll": train_mode, } ) @@ -221,8 +220,8 @@ class TestPowerEmptyPoll(TestCase): """get all cpus' base_frequency value""" key_values = ["base_frequency", "cpuinfo_max_freq", "cpuinfo_min_freq"] freq = "/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - # use dut alt session to get dut platform cpu base frequency attribute - cpu_topos = self.dut.get_all_cores() + # use SUT alt session to get SUT platform cpu base frequency attribute + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo["thread"]) @@ -299,7 +298,7 @@ class TestPowerEmptyPoll(TestCase): self.logger.info(msg.format(core_index, expected_freq)) def check_no_train(self): - output = self.dut.get_session_output(timeout=2) + output = self.sut_node.get_session_output(timeout=2) msg = "training steps should not be executed" self.verify("POWER: Training is Complete" not in output, msg) @@ -361,7 +360,7 @@ class TestPowerEmptyPoll(TestCase): def verify_power_driver(self): expected_drv = "intel_pstate" power_drv = self.get_sys_power_driver() - msg = "{0} should work with {1} driver on DUT".format( + msg = "{0} should work with {1} driver on SUT".format( self.suite_name, expected_drv ) self.verify(power_drv == expected_drv, msg) @@ -373,7 +372,7 @@ class TestPowerEmptyPoll(TestCase): def verify_pbf_supported(self): if self.is_support_pbf(): return - msg = "dut cpu doesn't support priority base frequency feature" + msg = "SUT cpu doesn't support priority base frequency feature" raise VerifyFailure(msg) def preset_test_environment(self): @@ -399,8 +398,8 @@ class TestPowerEmptyPoll(TestCase): self.verify_power_driver() # check if cpu support bpf feature self.verify_pbf_supported() - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Not enough ports") # prepare testing environment self.preset_test_environment() @@ -420,7 +419,7 @@ class TestPowerEmptyPoll(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def test_perf_basic_train_mode(self): """ diff --git a/tests/TestSuite_power_negative.py b/tests/TestSuite_power_negative.py index 9a24707a..82d87cee 100644 --- a/tests/TestSuite_power_negative.py +++ b/tests/TestSuite_power_negative.py @@ -23,18 +23,18 @@ class TestPowerNegative(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir - def prepare_binary(self, name, host_crb=None): - _host_crb = host_crb if host_crb else self.dut + def prepare_binary(self, name, host_node=None): + _host_node = host_node if host_node else self.sut_node example_dir = "examples/" + name - out = _host_crb.build_dpdk_apps("./" + example_dir) + out = _host_node.build_dpdk_apps("./" + example_dir) return os.path.join( - self.target_dir, _host_crb.apps_name[os.path.basename(name)] + self.target_dir, _host_node.apps_name[os.path.basename(name)] ) def add_console(self, session): @@ -45,10 +45,10 @@ class TestPowerNegative(TestCase): def get_console(self, name): default_con_table = { - self.dut.session.name: [self.dut.send_expect, self.dut.get_session_output], - self.dut.alt_session.name: [ - self.dut.alt_session.send_expect, - self.dut.alt_session.session.get_output_all, + self.sut_node.session.name: [self.sut_node.send_expect, self.sut_node.get_session_output], + self.sut_node.alt_session.name: [ + self.sut_node.alt_session.send_expect, + self.sut_node.alt_session.session.get_output_all, ], } if name not in default_con_table: @@ -56,7 +56,7 @@ class TestPowerNegative(TestCase): else: return default_con_table.get(name) - def execute_cmds(self, cmds, name="dut"): + def execute_cmds(self, cmds, name="sut"): console, msg_pipe = self.get_console(name) if len(cmds) == 0: return @@ -92,13 +92,13 @@ class TestPowerNegative(TestCase): return outputs def d_con(self, cmds): - return self.execute_cmds(cmds, name=self.dut.session.name) + return self.execute_cmds(cmds, name=self.sut_node.session.name) def d_a_con(self, cmds): - return self.execute_cmds(cmds, name=self.dut.alt_session.name) + return self.execute_cmds(cmds, name=self.sut_node.alt_session.name) def vm_con(self, cmds): - return self.execute_cmds(cmds, name=self.vm_dut.session.name) + return self.execute_cmds(cmds, name=self.vm_sut.session.name) def vm_g_con(self, cmds): return self.execute_cmds(cmds, name=self.guest_con_name) @@ -128,7 +128,7 @@ class TestPowerNegative(TestCase): get all cpus' base_frequency value, if not support pbf, set all to 0 """ if not self.is_support_pbf: - cpu_topos = self.dut.get_all_cores() + cpu_topos = self.sut_node.get_all_cores() _base_freqs_info = {} for index, _ in enumerate(cpu_topos): _base_freqs_info[index] = 0 @@ -136,8 +136,8 @@ class TestPowerNegative(TestCase): # if cpu support high priority core key_values = ["base_frequency", "cpuinfo_max_freq", "cpuinfo_min_freq"] freq = r"/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - # use dut alt session to get dut platform cpu base frequency attribute - cpu_topos = self.dut.get_all_cores() + # use SUT alt session to get SUT platform cpu base frequency attribute + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo["thread"]) @@ -181,7 +181,7 @@ class TestPowerNegative(TestCase): self.vcpu_map ) = ( self.vcpu_lst - ) = self.vm_dut = self.guest_session = self.is_guest_on = self.is_vm_on = None + ) = self.vm_sut = self.guest_session = self.is_guest_on = self.is_vm_on = None # vm config self.vm_name = "vm0" self.vm_max_ch = 8 @@ -197,9 +197,9 @@ class TestPowerNegative(TestCase): # set vm initialize parameters self.init_vms_params() # start vm - self.vm = LibvirtKvm(self.dut, self.vm_name, self.suite_name) + self.vm = LibvirtKvm(self.sut_node, self.vm_name, self.suite_name) # pass pf to virtual machine - pci_addr = self.dut.get_port_pci(self.dut_ports[0]) + pci_addr = self.sut_node.get_port_pci(self.sut_ports[0]) # add channel ch_name = "virtio.serial.port.poweragent.{0}" vm_path = os.path.join(self.vm_log_dir, "{0}.{1}") @@ -210,25 +210,25 @@ class TestPowerNegative(TestCase): } self.vm.add_vm_virtio_serial_channel(**channel) # boot up vm - self.vm_dut = self.vm.start() + self.vm_sut = self.vm.start() self.is_vm_on = True - self.verify(self.vm_dut, "create vm_dut fail !") - self.add_console(self.vm_dut.session) + self.verify(self.vm_sut, "create vm_sut fail !") + self.add_console(self.vm_sut.session) # get virtual machine cpu cores _vcpu_map = self.vm.get_vm_cpu() self.vcpu_map = [int(item) for item in _vcpu_map] - self.vcpu_lst = [int(item["core"]) for item in self.vm_dut.cores] + self.vcpu_lst = [int(item["core"]) for item in self.vm_sut.cores] def close_vm(self): # close vm if self.is_vm_on: if self.guest_session: - self.vm_dut.close_session(self.guest_session) + self.vm_sut.close_session(self.guest_session) self.guest_session = None self.vm.stop() self.is_vm_on = False self.vm = None - self.dut.virt_exit() + self.sut_node.virt_exit() cmd_fmt = "virsh {0} {1} > /dev/null 2>&1".format cmds = [ [cmd_fmt("shutdown", self.vm_name), "# "], @@ -258,9 +258,9 @@ class TestPowerNegative(TestCase): def init_guest_mgr(self): name = "vm_power_manager/guest_cli" - self.guest_cli = self.prepare_binary(name, host_crb=self.vm_dut) - self.guest_con_name = "_".join([self.vm_dut.NAME, name.replace("/", "-")]) - self.guest_session = self.vm_dut.create_session(self.guest_con_name) + self.guest_cli = self.prepare_binary(name, host_node=self.vm_sut) + self.guest_con_name = "_".join([self.vm_sut.NAME, name.replace("/", "-")]) + self.guest_session = self.vm_sut.create_session(self.guest_con_name) self.add_console(self.guest_session) def close_guest_mgr(self): @@ -274,13 +274,13 @@ class TestPowerNegative(TestCase): cmd = "whereis cpupower > /dev/null 2>&1; echo $?" output = self.d_a_con(cmd) status = True if output and output.strip() == "0" else False - msg = "cpupower tool have not installed on DUT" + msg = "cpupower tool have not installed on SUT" self.verify(status, msg) def verify_power_driver(self): expected_drv = "acpi-cpufreq" power_drv = self.get_sys_power_driver() - msg = "{0} should work with {1} driver on DUT".format( + msg = "{0} should work with {1} driver on SUT".format( self.suite_name, expected_drv ) self.verify(power_drv == expected_drv, msg) @@ -291,7 +291,7 @@ class TestPowerNegative(TestCase): # modprobe msr module to let the application can get the CPU HW info self.d_a_con("modprobe msr") self.d_a_con("cpupower frequency-set -g userspace") - self.dut.init_core_list_uncached_linux() + self.sut_node.init_core_list_uncached_linux() # check if cpu support bpf feature self.base_freqs_info = self.get_all_cpu_attrs() # boot up vm @@ -558,13 +558,13 @@ class TestPowerNegative(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports") self.check_cpupower_tool() self.verify_power_driver() # prepare testing environment self.preset_test_environment() - self.port_pci = self.dut.get_port_pci(self.dut_ports[0]) + self.port_pci = self.sut_node.get_port_pci(self.sut_ports[0]) def tear_down_all(self): """ @@ -584,8 +584,8 @@ class TestPowerNegative(TestCase): """ self.close_vm_power_mgr() self.close_guest_mgr() - self.vm_dut.kill_all() - self.dut.kill_all() + self.vm_sut.kill_all() + self.sut_node.kill_all() def test_inject_malformed_json_to_fifo_channel(self): """ diff --git a/tests/TestSuite_power_pbf.py b/tests/TestSuite_power_pbf.py index 086a0b87..454fb10a 100644 --- a/tests/TestSuite_power_pbf.py +++ b/tests/TestSuite_power_pbf.py @@ -33,9 +33,9 @@ class TestPowerPbf(TestCase): @property def target_dir(self): target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @@ -53,15 +53,15 @@ class TestPowerPbf(TestCase): return output_path def get_console(self, name): - if name == "dut": - console = self.dut.send_expect - msg_pipe = self.dut.get_session_output - elif name == "dut_alt": - console = self.dut.alt_session.send_expect - msg_pipe = self.dut.alt_session.session.get_output_all + if name == "sut": + console = self.sut_node.send_expect + msg_pipe = self.sut_node.get_session_output + elif name == "sut_alt": + console = self.sut_node.alt_session.send_expect + msg_pipe = self.sut_node.alt_session.session.get_output_all return console, msg_pipe - def execute_cmds(self, cmds, con_name="dut"): + def execute_cmds(self, cmds, con_name="sut"): console, msg_pipe = self.get_console(con_name) if len(cmds) == 0: return @@ -97,22 +97,22 @@ class TestPowerPbf(TestCase): return outputs def d_con(self, cmds): - return self.execute_cmds(cmds, con_name="dut") + return self.execute_cmds(cmds, con_name="sut") def d_a_con(self, cmds): - return self.execute_cmds(cmds, con_name="dut_alt") + return self.execute_cmds(cmds, con_name="sut_alt") def get_cores_mask(self, config="all"): - sockets = [self.dut.get_numa_id(index) for index in self.dut_ports] + sockets = [self.sut_node.get_numa_id(index) for index in self.sut_ports] socket_count = Counter(sockets) port_socket = list(socket_count.keys())[0] if len(socket_count) == 1 else -1 - mask = create_mask(self.dut.get_core_list(config, socket=port_socket)) + mask = create_mask(self.sut_node.get_core_list(config, socket=port_socket)) return mask def prepare_binary(self, name): example_dir = "examples/" + name - out = self.dut.build_dpdk_apps("./" + example_dir) - return os.path.join(self.target_dir, self.dut.apps_name[os.path.basename(name)]) + out = self.sut_node.build_dpdk_apps("./" + example_dir) + return os.path.join(self.target_dir, self.sut_node.apps_name[os.path.basename(name)]) def create_powermonitor_folder(self): cmd = "mkdir -p {0}; chmod 777 {0}".format("/tmp/powermonitor") @@ -121,7 +121,7 @@ class TestPowerPbf(TestCase): def init_test_binary_file(self): self.create_powermonitor_folder() # open debug SW when build dpdk - self.dut.build_install_dpdk( + self.sut_node.build_install_dpdk( self.target, extra_param="-Dc_args=-DRTE_LIBRTE_POWER_DEBUG" ) # set up vm power management binary process setting @@ -179,13 +179,13 @@ class TestPowerPbf(TestCase): "unit": unit, } } - # generate json data file and scp it to dut target source code folder + # generate json data file and scp it to SUT target source code folder json_name = "command_{}.json".format(core_index) json_file = os.sep.join([self.output_path, json_name]) with open(json_file, "w") as fp: json.dump(command, fp, indent=4, separators=(",", ": "), sort_keys=True) fp.write(os.linesep) - self.dut.session.copy_file_to(json_file, self.target_dir) + self.sut_node.session.copy_file_to(json_file, self.target_dir) # save a backup json file to retrace test command backup_file = json_file + self.timestamp() shutil.move(json_file, backup_file) @@ -258,8 +258,8 @@ class TestPowerPbf(TestCase): """get all cpus' base_frequency value""" key_values = ["base_frequency", "cpuinfo_max_freq", "cpuinfo_min_freq"] freq = r"/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - # use dut alt session to get dut platform cpu base frequency attribute - cpu_topos = self.dut.get_all_cores() + # use SUT alt session to get SUT platform cpu base frequency attribute + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo["thread"]) @@ -548,7 +548,7 @@ class TestPowerPbf(TestCase): def verify_pbf_supported(self): if self.is_support_pbf: return - msg = "dut cpu doesn't support power pbf feature" + msg = "SUT cpu doesn't support power pbf feature" raise Exception(msg) def verify_power_driver(self): @@ -567,17 +567,17 @@ class TestPowerPbf(TestCase): """ self.verify_power_driver() # get ports information - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - # get dut node cores information + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + # get SUT node cores information self.d_a_con("modprobe msr") - self.dut.init_core_list_uncached_linux() + self.sut_node.init_core_list_uncached_linux() # check if cpu support bpf feature self.verify_pbf_supported() self.cpu_info, self.base_freqs_info = self.get_all_cpu_attrs() self.logger.info(pformat(self.cpu_info)) self.logger.info(pformat(self.base_freqs_info)) - self.memory_channels = self.dut.get_memory_channels() + self.memory_channels = self.sut_node.get_memory_channels() self.init_test_binary_file() def set_up(self): @@ -590,7 +590,7 @@ class TestPowerPbf(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_power_pstate.py b/tests/TestSuite_power_pstate.py index b61ee70d..470e921c 100644 --- a/tests/TestSuite_power_pstate.py +++ b/tests/TestSuite_power_pstate.py @@ -34,9 +34,9 @@ class TestPowerPstate(TestCase): @property def target_dir(self): target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @@ -54,15 +54,15 @@ class TestPowerPstate(TestCase): return output_path def get_console(self, name): - if name == "dut": - console = self.dut.send_expect - msg_pipe = self.dut.get_session_output - elif name == "dut_alt": - console = self.dut.alt_session.send_expect - msg_pipe = self.dut.alt_session.session.get_output_all + if name == "sut": + console = self.sut_node.send_expect + msg_pipe = self.sut_node.get_session_output + elif name == "sut_alt": + console = self.sut_node.alt_session.send_expect + msg_pipe = self.sut_node.alt_session.session.get_output_all return console, msg_pipe - def execute_cmds(self, cmds, con_name="dut"): + def execute_cmds(self, cmds, con_name="sut"): console, msg_pipe = self.get_console(con_name) if len(cmds) == 0: return @@ -99,21 +99,21 @@ class TestPowerPstate(TestCase): return outputs def d_con(self, cmds): - return self.execute_cmds(cmds, con_name="dut") + return self.execute_cmds(cmds, con_name="sut") def d_a_con(self, cmds): - return self.execute_cmds(cmds, con_name="dut_alt") + return self.execute_cmds(cmds, con_name="sut_alt") def get_cores_mask(self, config="all"): - sockets = [self.dut.get_numa_id(index) for index in self.dut_ports] + sockets = [self.sut_node.get_numa_id(index) for index in self.sut_ports] socket_count = Counter(sockets) port_socket = list(socket_count.keys())[0] if len(socket_count) == 1 else -1 - mask = create_mask(self.dut.get_core_list(config, socket=port_socket)) + mask = create_mask(self.sut_node.get_core_list(config, socket=port_socket)) return mask @property def memory_channels(self): - return self.dut.get_memory_channels() + return self.sut_node.get_memory_channels() def create_powermonitor_folder(self): cmd = "mkdir -p {0}; chmod 777 {0}".format("/tmp/powermonitor") @@ -121,8 +121,8 @@ class TestPowerPstate(TestCase): def prepare_binary(self, name): example_dir = "examples/" + name - out = self.dut.build_dpdk_apps("./" + example_dir) - return os.path.join(self.target_dir, self.dut.apps_name[os.path.basename(name)]) + out = self.sut_node.build_dpdk_apps("./" + example_dir) + return os.path.join(self.target_dir, self.sut_node.apps_name[os.path.basename(name)]) def init_test_binary_file(self): self.create_powermonitor_folder() @@ -157,13 +157,13 @@ class TestPowerPstate(TestCase): "unit": unit, } } - # generate json data file and scp it to dut target source code folder + # generate json data file and scp it to SUT target source code folder json_name = "command_{}.json".format(core_index) json_file = os.sep.join([self.output_path, json_name]) with open(json_file, "w") as fp: json.dump(command, fp, indent=4, separators=(",", ": "), sort_keys=True) fp.write(os.linesep) - self.dut.session.copy_file_to(json_file, self.target_dir) + self.sut_node.session.copy_file_to(json_file, self.target_dir) # save a backup json file to retrace test command backup_file = json_file + self.timestamp shutil.move(json_file, backup_file) @@ -224,7 +224,7 @@ class TestPowerPstate(TestCase): """get all cpus' attribute""" key_values = ["cpuinfo_max_freq", "cpuinfo_min_freq"] freq = "/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - cpu_topos = self.dut.get_all_cores() + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo["thread"]) @@ -326,11 +326,11 @@ class TestPowerPstate(TestCase): Run before each test suite """ # get ports information - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.d_a_con("modprobe msr") - # get dut node cores information - self.dut.init_core_list_uncached_linux() + # get SUT node cores information + self.sut_node.init_core_list_uncached_linux() self.cpu_info = self.get_all_cpu_attrs() self.logger.debug(pformat(self.cpu_info)) self.verify_power_driver() diff --git a/tests/TestSuite_power_telemetry.py b/tests/TestSuite_power_telemetry.py index 9c0be5ea..b637b323 100644 --- a/tests/TestSuite_power_telemetry.py +++ b/tests/TestSuite_power_telemetry.py @@ -16,9 +16,8 @@ from copy import deepcopy from pprint import pformat from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT -from framework.settings import HEADER_SIZE +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import HEADER_SIZE, TRANSMIT_CONT from framework.test_case import TestCase from framework.utils import create_mask as dts_create_mask @@ -30,19 +29,19 @@ class TestPowerTelemetry(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir def d_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def d_a_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, str) else cmd - return self.dut.alt_session.send_expect(*_cmd) + return self.sut_node.alt_session.send_expect(*_cmd) def get_pkt_len(self, pkt_type, frame_size=64): headers_size = sum([HEADER_SIZE[x] for x in ["eth", "ip", pkt_type]]) @@ -60,28 +59,28 @@ class TestPowerTelemetry(TestCase): values = pkt_config pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - return pkt.pktgen.pkt + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + return scapy_pkt_builder.scapy_pkt_util.pkt def add_stream_to_pktgen(self, option): stream_ids = [] topos = [[0, 0]] for txport, rxport in topos: _option = deepcopy(option) - dmac = self.dut.get_mac_address(self.dut_ports[txport]) + dmac = self.sut_node.get_mac_address(self.sut_ports[txport]) pkt = self.config_stream(dmac) _option["pcap"] = pkt - stream_id = self.tester.pktgen.add_stream(txport, rxport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) return stream_ids def run_traffic(self, option): # clear streams before add new streams - self.tester.pktgen.clear_streams() - # set stream into pktgen + self.tg_node.perf_tg.clear_streams() + # set stream into traffic generator stream_option = { "stream_config": { "txmode": {}, @@ -90,9 +89,9 @@ class TestPowerTelemetry(TestCase): } } stream_ids = self.add_stream_to_pktgen(stream_option) - # run pktgen traffic + # run traffic generator traffic traffic_opt = option.get("traffic_opt") - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) self.logger.debug(pformat(traffic_opt)) self.logger.debug(pformat(result)) @@ -100,12 +99,12 @@ class TestPowerTelemetry(TestCase): def prepare_binary(self, name): example_dir = "examples/" + name - out = self.dut.build_dpdk_apps("./" + example_dir) - return os.path.join(self.target_dir, self.dut.apps_name[os.path.basename(name)]) + out = self.sut_node.build_dpdk_apps("./" + example_dir) + return os.path.join(self.target_dir, self.sut_node.apps_name[os.path.basename(name)]) def get_cores_mask(self, config): - ports_socket = self.dut.get_numa_id(self.dut.get_ports()[0]) - mask = dts_create_mask(self.dut.get_core_list(config, socket=ports_socket)) + ports_socket = self.sut_node.get_numa_id(self.sut_node.get_ports()[0]) + mask = dts_create_mask(self.sut_node.get_core_list(config, socket=ports_socket)) return mask def init_l3fwd_power(self): @@ -127,7 +126,7 @@ class TestPowerTelemetry(TestCase): **{ "core_mask": core_mask, "core": core, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), } ) prompt = "L3FWD_POWER: entering main telemetry loop" @@ -192,7 +191,7 @@ class TestPowerTelemetry(TestCase): query_script = os.path.join(self.output_path, fileName) with open(query_script, "w") as fp: fp.write("#! /usr/bin/env python" + os.linesep + str(script_content)) - self.dut.session.copy_file_to(query_script, self.target_dir) + self.sut_node.session.copy_file_to(query_script, self.target_dir) script_file = os.path.join(self.target_dir, fileName) cmd = "chmod 777 {}".format(script_file) self.d_a_con(cmd) @@ -225,7 +224,7 @@ class TestPowerTelemetry(TestCase): msg = "failed to query metric data" self.verify("Get metrics done" in output, msg) dst_file = os.path.join(self.output_path, json_name) - self.dut.session.copy_file_from(json_file, dst_file) + self.sut_node.session.copy_file_from(json_file, dst_file) msg = "failed to get {}".format(json_name) self.verify(os.path.exists(dst_file), msg) with open(dst_file, "r") as fp: @@ -250,7 +249,7 @@ class TestPowerTelemetry(TestCase): cmd = "{0} -j {1} -f {2}".format(self.query_tool, json_file, pipe) output = self.d_a_con(cmd) dst_file = os.path.join(self.output_path, json_name) - self.dut.session.copy_file_from(json_file, dst_file) + self.sut_node.session.copy_file_from(json_file, dst_file) def parse_telemetry_query_on_traffic(self): json_name = "telemetry_data_on_traffic.json" @@ -397,7 +396,7 @@ class TestPowerTelemetry(TestCase): def verify_power_driver(self): expected_drv = "acpi-cpufreq" power_drv = self.get_sys_power_driver() - msg = "{0} should work with {1} driver on DUT".format( + msg = "{0} should work with {1} driver on SUT".format( self.suite_name, expected_drv ) self.verify(power_drv == expected_drv, msg) @@ -417,8 +416,8 @@ class TestPowerTelemetry(TestCase): Run at the start of each test suite. """ self.verify_power_driver() - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Not enough ports") # prepare testing environment self.preset_test_environment() @@ -432,7 +431,7 @@ class TestPowerTelemetry(TestCase): def tear_down(self): """Run after each test case.""" - self.dut.kill_all() + self.sut_node.kill_all() def test_perf_telemetry_power_info(self): self.verify_telemetry_power_info() diff --git a/tests/TestSuite_ptpclient.py b/tests/TestSuite_ptpclient.py index c7abdbc8..44a390bb 100644 --- a/tests/TestSuite_ptpclient.py +++ b/tests/TestSuite_ptpclient.py @@ -20,28 +20,28 @@ class TestPtpClient(TestCase): Run at the start of each test suite. IEEE1588 Prerequisites """ - out = self.tester.send_expect("ptp4l -v", "# ") + out = self.tg_node.send_expect("ptp4l -v", "# ") self.verify("command not found" not in out, "ptp4l not install") - dutPorts = self.dut.get_ports() - self.verify(len(dutPorts) > 0, "No ports found for " + self.nic) + sutPorts = self.sut_node.get_ports() + self.verify(len(sutPorts) > 0, "No ports found for " + self.nic) # recompile the package with extra options of support IEEE1588. - self.dut.skip_setup = False - self.dut.build_install_dpdk( + self.sut_node.skip_setup = False + self.sut_node.build_install_dpdk( self.target, extra_options="-Dc_args=-DRTE_LIBRTE_IEEE1588" ) # build sample app - out = self.dut.build_dpdk_apps("examples/ptpclient") - self.app_ptpclient_path = self.dut.apps_name["ptpclient"] + out = self.sut_node.build_dpdk_apps("examples/ptpclient") + self.app_ptpclient_path = self.sut_node.apps_name["ptpclient"] self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") self.app_name = self.app_ptpclient_path[ self.app_ptpclient_path.rfind("/") + 1 : ] - port = self.tester.get_local_port(dutPorts[0]) - self.itf0 = self.tester.get_interface(port) - self.eal_para = self.dut.create_eal_parameters() + port = self.tg_node.get_local_port(sutPorts[0]) + self.itf0 = self.tg_node.get_interface(port) + self.eal_para = self.sut_node.create_eal_parameters() def set_up(self): """ @@ -64,7 +64,7 @@ class TestPtpClient(TestCase): self.result_table_print() def kill_ptpclient(self): - self.dut.send_expect("killall %s" % self.app_name, "# ") + self.sut_node.send_expect("killall %s" % self.app_name, "# ") def test_ptpclient(self): """ @@ -72,18 +72,18 @@ class TestPtpClient(TestCase): """ # use the first port on that self.nic if self.nic in ["cavium_a063", "cavium_a064"]: - self.tester.send_expect("ptp4l -i %s -2 -m &" % self.itf0, "ptp4l") + self.tg_node.send_expect("ptp4l -i %s -2 -m &" % self.itf0, "ptp4l") else: - self.tester.send_expect("ptp4l -i %s -2 -m -S &" % self.itf0, "ptp4l") + self.tg_node.send_expect("ptp4l -i %s -2 -m -S &" % self.itf0, "ptp4l") # run ptpclient on the background - self.dut.send_expect( + self.sut_node.send_expect( "./%s %s -- -T 0 -p 0x1 " % (self.app_ptpclient_path, self.eal_para) + "&", "Delta between master and slave", 60, ) time.sleep(3) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.kill_ptpclient() self.verify("T1" and "T2" and "T3" and "T4" in out, "T1,T2,T3,T4 clock error") @@ -100,24 +100,24 @@ class TestPtpClient(TestCase): def test_update_system(self): - # set the dut system time - self.dut.send_expect("date -s '2000-01-01 00:00:00'", "# ") - d_time = self.dut.send_expect("date '+%Y-%m-%d %H:%M'", "# ") + # set the SUT system time + self.sut_node.send_expect("date -s '2000-01-01 00:00:00'", "# ") + d_time = self.sut_node.send_expect("date '+%Y-%m-%d %H:%M'", "# ") self.verify(d_time == "2000-01-01 00:00", "set the time error") if self.nic in ["cavium_a063", "cavium_a064"]: - self.tester.send_expect("ptp4l -i %s -2 -m &" % self.itf0, "ptp4l") + self.tg_node.send_expect("ptp4l -i %s -2 -m &" % self.itf0, "ptp4l") else: - self.tester.send_expect("ptp4l -i %s -2 -m -S &" % self.itf0, "ptp4l") + self.tg_node.send_expect("ptp4l -i %s -2 -m -S &" % self.itf0, "ptp4l") # run ptpclient on the background - self.dut.send_expect( + self.sut_node.send_expect( "./%s %s -- -T 1 -p 0x1" % (self.app_ptpclient_path, self.eal_para) + "&", "Delta between master and slave", 60, ) time.sleep(3) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.kill_ptpclient() @@ -133,32 +133,32 @@ class TestPtpClient(TestCase): self.creat_table(Delta_us) - tester_out = self.tester.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") - dut_out = self.dut.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") - # some times, when using data cmdline get dut system time, after kill ptpclient example. + tg_out = self.tg_node.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") + sut_out = self.sut_node.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") + # some times, when using data cmdline get SUT system time, after kill ptpclient example. # the output will include kill process info, at that time need get system time again. - if len(dut_out) != len(tester_out): - dut_out = self.dut.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") + if len(sut_out) != len(tg_out): + sut_out = self.sut_node.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") # In rare cases minute may change while getting time. So get time again - if dut_out != tester_out: - tester_out = self.tester.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") - dut_out = self.dut.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") + if sut_out != tg_out: + tg_out = self.tg_node.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") + sut_out = self.sut_node.send_expect("date -u '+%Y-%m-%d %H:%M'", "# ") - self.verify(tester_out == dut_out, "the DUT time synchronous error") + self.verify(tg_out == sut_out, "the SUT time synchronous error") def tear_down(self): """ Run after each test case. """ - self.tester.send_expect("killall ptp4l", "# ") + self.tg_node.send_expect("killall ptp4l", "# ") def tear_down_all(self): """ Run after each test suite. """ # Restore the systime from RTC time. - out = self.dut.send_expect("hwclock", "# ") + out = self.sut_node.send_expect("hwclock", "# ") rtc_time = re.findall(r"(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})", out)[0] - self.dut.send_command('date -s "%s"' % rtc_time, "# ") + self.sut_node.send_command('date -s "%s"' % rtc_time, "# ") # recompile the package without extra options of support IEEE1588. - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) diff --git a/tests/TestSuite_ptype_mapping.py b/tests/TestSuite_ptype_mapping.py index b460e293..cab875c9 100644 --- a/tests/TestSuite_ptype_mapping.py +++ b/tests/TestSuite_ptype_mapping.py @@ -7,8 +7,8 @@ import time import framework.utils as utils from framework.exception import VerifyFailure -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -32,15 +32,15 @@ class TestPtype_Mapping(TestCase): ], "ptype mapping test can not support %s nic" % self.nic, ) - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() self.verify(len(ports) >= 1, "Insufficient ports for testing") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] - self.dut_port = valports[0] - tester_port = self.tester.get_local_port(self.dut_port) - self.tester_iface = self.tester.get_interface(tester_port) + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] + self.sut_port = valports[0] + tg_port = self.tg_node.get_local_port(self.sut_port) + self.tg_iface = self.tg_node.get_interface(tg_port) if self.nic not in ["cavium_a063", "cavium_a064"]: - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e '" + '/printf(" - VLAN tci=0x%x", mb->vlan_tci);' + '/a\\\\t\\tprintf(" - pktype: 0x%x", mb->packet_type);\'' @@ -50,17 +50,17 @@ class TestPtype_Mapping(TestCase): verify=True, ) - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.build_install_dpdk(self.sut_node.target) def set_up(self): """ Run before each test case. """ - self.dut_testpmd = PmdOutput(self.dut) - self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_testpmd.start_testpmd("Default", "--port-topology=chained") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") def run_test(self, sw_ptype, pkt_types, chk_types): """ @@ -72,9 +72,9 @@ class TestPtype_Mapping(TestCase): pkt_names = chk_types[pkt_type] else: pkt_names = pkt_types[pkt_type] - pkt = Packet(pkt_type=pkt_type) - pkt.send_pkt(self.tester, tx_port=self.tester_iface, count=4) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_iface, count=4) + out = self.sut_node.get_session_output(timeout=2) if sw_ptype != None: self.verify(sw_ptype in out, "Failed to detect correct ptype value") for pkt_layer_name in pkt_names: @@ -104,7 +104,7 @@ class TestPtype_Mapping(TestCase): Get ptype mapping table and run ptype test. """ if self.nic in ["cavium_a063", "cavium_a064"]: - out = self.dut_testpmd.execute_cmd("show port 0 ptypes") + out = self.sut_testpmd.execute_cmd("show port 0 ptypes") ptype_list = [ "L2_ETHER", "L3_IPV4", @@ -159,10 +159,10 @@ class TestPtype_Mapping(TestCase): } self.run_test(None, pktType, check_ptype) else: - out = self.dut_testpmd.execute_cmd("ptype mapping get 0 0") + out = self.sut_testpmd.execute_cmd("ptype mapping get 0 0") time.sleep(3) self.verify("255" in out, "Failed to get 255 items ptype mapping table!!!") - out = self.dut_testpmd.execute_cmd("ptype mapping get 0 1") + out = self.sut_testpmd.execute_cmd("ptype mapping get 0 1") time.sleep(3) self.verify("166" in out, "Failed to get 166 items ptype mapping table!!!") sw_ptype = self.strip_ptype(out, hw_ptype) @@ -209,7 +209,7 @@ class TestPtype_Mapping(TestCase): Reset packet mapping table after changing table. """ self.ptype_mapping_test() - self.dut_testpmd.execute_cmd("ptype mapping update 0 38 0x026010e1") + self.sut_testpmd.execute_cmd("ptype mapping update 0 38 0x026010e1") chk_types = { "MAC_IP_IPv6_UDP_PKT": [ "L2_ETHER", @@ -228,7 +228,7 @@ class TestPtype_Mapping(TestCase): ], } self.ptype_mapping_test(check_ptype=chk_types) - self.dut_testpmd.execute_cmd("ptype mapping reset 0") + self.sut_testpmd.execute_cmd("ptype mapping reset 0") self.ptype_mapping_test() def test_ptype_mapping_update(self): @@ -237,8 +237,8 @@ class TestPtype_Mapping(TestCase): """ self.ptype_mapping_test() - self.dut_testpmd.execute_cmd("ptype mapping update 0 38 0x026010e1") - self.dut_testpmd.execute_cmd("ptype mapping update 0 75 0x026010e1") + self.sut_testpmd.execute_cmd("ptype mapping update 0 38 0x026010e1") + self.sut_testpmd.execute_cmd("ptype mapping update 0 75 0x026010e1") check_types = [ "L2_ETHER", "L3_IPV6_EXT_UNKNOWN", @@ -252,7 +252,7 @@ class TestPtype_Mapping(TestCase): "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": check_types, } self.ptype_mapping_test(check_ptype=chk_types) - self.dut_testpmd.execute_cmd("ptype mapping reset 0") + self.sut_testpmd.execute_cmd("ptype mapping reset 0") self.ptype_mapping_test() def test_ptype_mapping_replace(self): @@ -260,8 +260,8 @@ class TestPtype_Mapping(TestCase): Replace a specific or a group of software defined ptypes with a new one. """ self.ptype_mapping_test() - self.dut_testpmd.execute_cmd("ptype mapping replace 0 0x06426091 0 0x06421091") - self.dut_testpmd.execute_cmd("ptype mapping update 0 38 0x06421091") + self.sut_testpmd.execute_cmd("ptype mapping replace 0 0x06426091 0 0x06421091") + self.sut_testpmd.execute_cmd("ptype mapping update 0 38 0x06421091") check_types = [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", @@ -276,7 +276,7 @@ class TestPtype_Mapping(TestCase): "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": check_types, } self.ptype_mapping_test(check_ptype=chk_types) - self.dut_testpmd.execute_cmd("ptype mapping replace 0 0x06421091 1 0x02601091") + self.sut_testpmd.execute_cmd("ptype mapping replace 0 0x06421091 1 0x02601091") check_types = [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", @@ -290,26 +290,26 @@ class TestPtype_Mapping(TestCase): "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": check_types, } self.ptype_mapping_test(check_ptype=chk_types) - self.dut_testpmd.execute_cmd("ptype mapping reset 0") + self.sut_testpmd.execute_cmd("ptype mapping reset 0") self.ptype_mapping_test() def tear_down(self): """ Run after each test case. """ - self.dut_testpmd.quit() + self.sut_testpmd.quit() def tear_down_all(self): """ Run after each test suite. """ if self.nic not in ["cavium_a063", "cavium_a064"]: - self.dut.send_expect( + self.sut_node.send_expect( 'sed -i \'/printf(" - pktype: 0x%x", ' + "mb->packet_type);/d' app/test-pmd/util.c", "# ", 30, verify=True, ) - self.dut.build_install_dpdk(self.dut.target) - self.dut.kill_all() + self.sut_node.build_install_dpdk(self.sut_node.target) + self.sut_node.kill_all() diff --git a/tests/TestSuite_pvp_diff_qemu_version.py b/tests/TestSuite_pvp_diff_qemu_version.py index c62a22a9..fe7bee2a 100644 --- a/tests/TestSuite_pvp_diff_qemu_version.py +++ b/tests/TestSuite_pvp_diff_qemu_version.py @@ -17,30 +17,30 @@ import time from scapy.utils import wrpcap import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM class TestVhostPVPDiffQemuVersion(TestCase): def set_up_all(self): # Get and verify the ports - self.dut_ports = self.dut.get_ports() - self.pf = self.dut_ports[0] + self.sut_ports = self.sut_node.get_ports() + self.pf = self.sut_ports[0] # Get the port's socket - netdev = self.dut.ports_info[self.pf]["port"] + netdev = self.sut_node.ports_info[self.pf]["port"] self.socket = netdev.get_nic_socket() self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.socket] ) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( self.cores_num >= 3, "There has not enought cores to test this suite" ) - self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket) - self.vm_dut = None + self.cores = self.sut_node.get_core_list("1S/3C/1T", socket=self.socket) + self.vm_sut = None self.packet_params_set() self.logger.info( @@ -55,25 +55,25 @@ class TestVhostPVPDiffQemuVersion(TestCase): self.verify(res is True, "The path of qemu version in config file not right") self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.pci_info = self.dut.ports_info[0]["pci"] + self.pktgen_helper = TrafficGeneratorStream() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.pci_info = self.sut_node.ports_info[0]["pci"] self.number_of_ports = 1 - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): """ Run before each test case. """ - self.vhost = self.dut.new_session(suite="vhost-user") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -I qemu-system-x86_64", "#", 20) + self.vhost = self.sut_node.new_session(suite="vhost-user") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -I qemu-system-x86_64", "#", 20) def packet_params_set(self): self.frame_sizes = [64, 128, 256, 512, 1024, 1500] @@ -115,7 +115,7 @@ class TestVhostPVPDiffQemuVersion(TestCase): """ verify the config has config enough qemu version """ - self.vm = VM(self.dut, "vm0", self.suite_name) + self.vm = VM(self.sut_node, "vm0", self.suite_name) self.vm.load_config() # get qemu version list from config file self.get_qemu_list_from_config() @@ -123,22 +123,22 @@ class TestVhostPVPDiffQemuVersion(TestCase): for i in range(qemu_num): qemu_path = self.qemu_list[i]["path"] - out = self.dut.send_expect("ls %s" % qemu_path, "#") + out = self.sut_node.send_expect("ls %s" % qemu_path, "#") if "No such file or directory" in out: self.logger.error( - "No emulator [ %s ] on the DUT [ %s ]" - % (qemu_path, self.dut.get_ip_address()) + "No emulator [ %s ] on the SUT [ %s ]" + % (qemu_path, self.sut_node.get_ip_address()) ) return False - out = self.dut.send_expect("[ -x %s ];echo $?" % qemu_path, "# ") + out = self.sut_node.send_expect("[ -x %s ];echo $?" % qemu_path, "# ") if out != "0": self.logger.error( - "Emulator [ %s ] not executable on the DUT [ %s ]" - % (qemu_path, self.dut.get_ip_address()) + "Emulator [ %s ] not executable on the SUT [ %s ]" + % (qemu_path, self.sut_node.get_ip_address()) ) return False - out = self.dut.send_expect("%s --version" % qemu_path, "#") + out = self.sut_node.send_expect("%s --version" % qemu_path, "#") result = re.search("QEMU\s*emulator\s*version\s*(\d*.\d*)", out) version = result.group(1) # update the version info to self.qemu_list @@ -171,7 +171,7 @@ class TestVhostPVPDiffQemuVersion(TestCase): """ start vm """ - self.vm = VM(self.dut, "vm0", "pvp_diff_qemu_version") + self.vm = VM(self.sut_node, "vm0", "pvp_diff_qemu_version") vm_params = {} vm_params["driver"] = "vhost-user" vm_params["opt_path"] = "%s/vhost-net" % self.base_dir @@ -188,8 +188,8 @@ class TestVhostPVPDiffQemuVersion(TestCase): # Due to we have change the params info before, # so need to start vm with load_config=False try: - self.vm_dut = self.vm.start(load_config=False) - if self.vm_dut is None: + self.vm_sut = self.vm.start(load_config=False) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -199,7 +199,7 @@ class TestVhostPVPDiffQemuVersion(TestCase): Launch the vhost testpmd """ vdev = [r"'eth_vhost0,iface=%s/vhost-net,queues=1'" % self.base_dir] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, prefix="vhost", ports=[self.pci_info], vdevs=vdev ) para = " -- -i --nb-cores=1 --txd=1024 --rxd=1024" @@ -212,13 +212,13 @@ class TestVhostPVPDiffQemuVersion(TestCase): """ Start testpmd in vm """ - if self.vm_dut is not None: + if self.vm_sut is not None: vm_testpmd = ( self.path + " -c 0x3 -n 3" + " -- -i --nb-cores=1 --txd=1024 --rxd=1024" ) - self.vm_dut.send_expect(vm_testpmd, "testpmd> ", 20) - self.vm_dut.send_expect("set fwd mac", "testpmd> ", 20) - self.vm_dut.send_expect("start", "testpmd> ") + self.vm_sut.send_expect(vm_testpmd, "testpmd> ", 20) + self.vm_sut.send_expect("set fwd mac", "testpmd> ", 20) + self.vm_sut.send_expect("start", "testpmd> ") @property def check_value(self): @@ -254,24 +254,24 @@ class TestVhostPVPDiffQemuVersion(TestCase): self.dst1, payload, ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s/pvp_diff_qemu_version.pcap", %s)' % (self.out_path, flow) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() tgenInput = [] - port = self.tester.get_local_port(self.pf) + port = self.tg_node.get_local_port(self.pf) tgenInput.append( (port, port, "%s/pvp_diff_qemu_version.pcap" % self.out_path) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5, "duration": 20} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -306,11 +306,11 @@ class TestVhostPVPDiffQemuVersion(TestCase): stop testpmd in vhost and qemu close the qemu """ - self.vm_dut.send_expect("quit", "#", 20) + self.vm_sut.send_expect("quit", "#", 20) self.vhost.send_expect("quit", "#", 20) self.vm.stop() - self.dut.send_expect("killall -I %s" % self.testpmd_name, "#", 20) - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -I %s" % self.testpmd_name, "#", 20) + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") def test_perf_vhost_pvp_diffrent_qemu_version_mergeable_mac(self): """ @@ -351,9 +351,9 @@ class TestVhostPVPDiffQemuVersion(TestCase): Run after each test case. Clear qemu and testpmd to avoid blocking the following TCs """ - self.dut.close_session(self.vhost) - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.close_session(self.vhost) + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_pvp_multi_paths_performance.py b/tests/TestSuite_pvp_multi_paths_performance.py index 99bfbf1f..f960ce6d 100644 --- a/tests/TestSuite_pvp_multi_paths_performance.py +++ b/tests/TestSuite_pvp_multi_paths_performance.py @@ -12,11 +12,11 @@ from copy import deepcopy import framework.rst as rst import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPVPMultiPathPerformance(TestCase): @@ -27,25 +27,25 @@ class TestPVPMultiPathPerformance(TestCase): self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.core_config = "1S/4C/1T" self.number_of_ports = 1 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_user = self.core_list[0:2] self.core_list_host = self.core_list[2:4] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) + self.pktgen_helper = TrafficGeneratorStream() + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) self.save_result_flag = True self.json_obj = {} @@ -75,12 +75,12 @@ class TestPVPMultiPathPerformance(TestCase): self.gap = self.get_suite_cfg()["accepted_tolerance"] self.test_result = {} self.nb_desc = self.test_parameters[64][0] - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def send_and_verify(self, case_info): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: tgen_input = [] @@ -90,21 +90,21 @@ class TestPVPMultiPathPerformance(TestCase): "Test running at parameters: " + "framesize: {}, rxd/txd: {}".format(frame_size, self.nb_desc) ) - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - destination_mac = self.dut.get_mac_address(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % destination_mac}) - pkt.save_pcapfile(self.tester, "%s/multi_path.pcap" % (self.out_path)) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + destination_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % destination_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/multi_path.pcap" % (self.out_path)) tgen_input.append( (tx_port, rx_port, "%s/multi_path.pcap" % (self.out_path)) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 self.throughput[frame_size][self.nb_desc] = Mpps linerate = ( @@ -121,7 +121,7 @@ class TestPVPMultiPathPerformance(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -130,12 +130,12 @@ class TestPVPMultiPathPerformance(TestCase): """ start testpmd on vhost """ - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") vdevs = ["net_vhost0,iface=vhost-net,queues=1,client=0"] - ports = [self.dut.ports_info[self.dut_ports[0]]["pci"]] + ports = [self.sut_node.ports_info[self.sut_ports[0]]["pci"]] param = "--nb-cores=1 --txd=%d --rxd=%d" % (self.nb_desc, self.nb_desc) self.vhost_user_pmd.start_testpmd( cores=self.core_list_host, @@ -294,8 +294,8 @@ class TestPVPMultiPathPerformance(TestCase): """ close all session of vhost an virtio """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user0) def test_perf_pvp_virtio11_mergeable(self): """ @@ -511,7 +511,7 @@ class TestPVPMultiPathPerformance(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py index b3bcc4cb..ab38207d 100644 --- a/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py +++ b/tests/TestSuite_pvp_multi_paths_vhost_single_core_performance.py @@ -13,11 +13,11 @@ from copy import deepcopy import framework.rst as rst import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPVPMultiPathVhostPerformance(TestCase): @@ -28,27 +28,27 @@ class TestPVPMultiPathVhostPerformance(TestCase): self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.core_config = "1S/5C/1T" self.number_of_ports = 1 - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_user = self.core_list[0:3] self.core_list_host = self.core_list[3:5] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) + self.pktgen_helper = TrafficGeneratorStream() + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) self.save_result_flag = True self.json_obj = {} - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): @@ -80,7 +80,7 @@ class TestPVPMultiPathVhostPerformance(TestCase): def send_and_verify(self, case_info): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: tgen_input = [] @@ -89,21 +89,21 @@ class TestPVPMultiPathVhostPerformance(TestCase): "Test running at parameters: " + "framesize: {}, rxd/txd: {}".format(frame_size, self.nb_desc) ) - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - destination_mac = self.dut.get_mac_address(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % destination_mac}) - pkt.save_pcapfile(self.tester, "%s/multi_path.pcap" % (self.out_path)) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + destination_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % destination_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/multi_path.pcap" % (self.out_path)) tgen_input.append( (tx_port, rx_port, "%s/multi_path.pcap" % (self.out_path)) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 linerate = ( Mpps @@ -119,7 +119,7 @@ class TestPVPMultiPathVhostPerformance(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -129,10 +129,10 @@ class TestPVPMultiPathVhostPerformance(TestCase): start testpmd on vhost """ # Clean the execution ENV - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") vdevs = ["net_vhost0,iface=vhost-net,queues=1"] param = "--nb-cores=1 --txd=%d --rxd=%d" % (self.nb_desc, self.nb_desc) self.vhost_user_pmd.start_testpmd( @@ -292,8 +292,8 @@ class TestPVPMultiPathVhostPerformance(TestCase): """ close all session of vhost and vhost-user """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user0) def test_perf_vhost_single_core_virtio11_mergeable(self): """ @@ -509,7 +509,7 @@ class TestPVPMultiPathVhostPerformance(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py b/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py index 6972f569..30e3a6bc 100644 --- a/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py +++ b/tests/TestSuite_pvp_multi_paths_virtio_single_core_performance.py @@ -13,11 +13,11 @@ from copy import deepcopy import framework.rst as rst import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPVPMultiPathVirtioPerformance(TestCase): @@ -28,28 +28,28 @@ class TestPVPMultiPathVirtioPerformance(TestCase): self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.core_config = "1S/5C/1T" self.number_of_ports = 1 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_user = self.core_list[0:2] self.core_list_host = self.core_list[2:5] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) + self.pktgen_helper = TrafficGeneratorStream() + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) self.save_result_flag = True self.json_obj = {} - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): @@ -81,7 +81,7 @@ class TestPVPMultiPathVirtioPerformance(TestCase): def send_and_verify(self, case_info): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: tgen_input = [] @@ -90,23 +90,23 @@ class TestPVPMultiPathVirtioPerformance(TestCase): "Test running at parameters: " + "framesize: {}, rxd/txd: {}".format(frame_size, self.nb_desc) ) - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - destination_mac = self.dut.get_mac_address(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % destination_mac}) - pkt.save_pcapfile(self.tester, "%s/multi_path.pcap" % (self.out_path)) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + destination_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % destination_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/multi_path.pcap" % (self.out_path)) tgen_input.append( (tx_port, rx_port, "%s/multi_path.pcap" % (self.out_path)) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -125,7 +125,7 @@ class TestPVPMultiPathVirtioPerformance(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -134,10 +134,10 @@ class TestPVPMultiPathVirtioPerformance(TestCase): """ start testpmd on vhost """ - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") vdevs = ["net_vhost0,iface=vhost-net,queues=1,client=0"] param = "--nb-cores=2 --txd=%d --rxd=%d" % (self.nb_desc, self.nb_desc) self.vhost_user_pmd.start_testpmd( @@ -297,8 +297,8 @@ class TestPVPMultiPathVirtioPerformance(TestCase): """ close all session of vhost and vhost-user """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user0) def test_perf_virtio_single_core_virtio11_mergeable(self): """ @@ -514,7 +514,7 @@ class TestPVPMultiPathVirtioPerformance(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_pvp_qemu_multi_paths_port_restart.py b/tests/TestSuite_pvp_qemu_multi_paths_port_restart.py index 2b753eb1..b47c4764 100644 --- a/tests/TestSuite_pvp_qemu_multi_paths_port_restart.py +++ b/tests/TestSuite_pvp_qemu_multi_paths_port_restart.py @@ -13,9 +13,9 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -26,26 +26,26 @@ class TestPVPQemuMultiPathPortRestart(TestCase): """ self.frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518] self.core_config = "1S/3C/1T" - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") # get core mask - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.vm_dut = None + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.vm_sut = None self.virtio1_mac = "52:54:00:00:00:01" self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.pci_info = self.dut.ports_info[0]["pci"] + self.pktgen_helper = TrafficGeneratorStream() + self.pci_info = self.sut_node.ports_info[0]["pci"] self.number_of_ports = 1 - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): @@ -53,9 +53,9 @@ class TestPVPQemuMultiPathPortRestart(TestCase): Run before each test case. """ # Clean the execution ENV - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") # Prepare the result table self.table_header = [ "FrameSize(B)", @@ -66,16 +66,16 @@ class TestPVPQemuMultiPathPortRestart(TestCase): ] self.result_table_create(self.table_header) - self.vhost = self.dut.new_session(suite="vhost-user") + self.vhost = self.sut_node.new_session(suite="vhost-user") def start_vhost_testpmd(self): """ start testpmd on vhost """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") vdev = [r"'net_vhost0,iface=vhost-net,queues=1'"] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, prefix="vhost", ports=[self.pci_info], vdevs=vdev ) para = " -- -i --nb-cores=1 --txd=1024 --rxd=1024" @@ -103,15 +103,15 @@ class TestPVPQemuMultiPathPortRestart(TestCase): command = ( self.path + "-c 0x3 -n 3 -- -i " + "--nb-cores=1 --txd=1024 --rxd=1024" ) - self.vm_dut.send_expect(command, "testpmd> ", 30) - self.vm_dut.send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut.send_expect("start", "testpmd> ", 30) + self.vm_sut.send_expect(command, "testpmd> ", 30) + self.vm_sut.send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut.send_expect("start", "testpmd> ", 30) def start_one_vm(self, modem=0, mergeable=0): """ start qemu """ - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") vm_params = {} vm_params["driver"] = "vhost-user" vm_params["opt_path"] = "./vhost-net" @@ -135,8 +135,8 @@ class TestPVPQemuMultiPathPortRestart(TestCase): self.vm.set_vm_device(**vm_params) try: - self.vm_dut = self.vm.start() - if self.vm_dut is None: + self.vm_sut = self.vm.start() + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -211,20 +211,20 @@ class TestPVPQemuMultiPathPortRestart(TestCase): """ start to send packet and get the throughput """ - pkt = Packet(pkt_type="IP_RAW", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/pvp_multipath.pcap" % (self.out_path)) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/pvp_multipath.pcap" % (self.out_path)) tgenInput = [] - port = self.tester.get_local_port(self.dut_ports[0]) + port = self.tg_node.get_local_port(self.sut_ports[0]) tgenInput.append((port, port, "%s/pvp_multipath.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -263,13 +263,13 @@ class TestPVPQemuMultiPathPortRestart(TestCase): close testpmd about vhost-user and vm_testpmd """ self.vhost.send_expect("quit", "#", 60) - self.vm_dut.send_expect("quit", "#", 60) + self.vm_sut.send_expect("quit", "#", 60) def close_session(self): """ close session of vhost-user """ - self.dut.close_session(self.vhost) + self.sut_node.close_session(self.vhost) def test_perf_pvp_qemu_mergeable_mac(self): """ @@ -370,8 +370,8 @@ class TestPVPQemuMultiPathPortRestart(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.close_session() time.sleep(2) diff --git a/tests/TestSuite_pvp_share_lib.py b/tests/TestSuite_pvp_share_lib.py index ee3ae39a..0fcd9820 100644 --- a/tests/TestSuite_pvp_share_lib.py +++ b/tests/TestSuite_pvp_share_lib.py @@ -8,9 +8,9 @@ The feature need compile dpdk as shared libraries. """ import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPVPShareLib(TestCase): @@ -18,11 +18,11 @@ class TestPVPShareLib(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.core_config = "1S/4C/1T" - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.verify( @@ -32,35 +32,35 @@ class TestPVPShareLib(TestCase): self.core_list_virtio_user = self.core_list[0:2] self.core_list_vhost_user = self.core_list[2:4] - self.mem_channels = self.dut.get_memory_channels() - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.mem_channels = self.sut_node.get_memory_channels() + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.prepare_share_lib_env() self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user = self.dut.new_session(suite="virtio-user") + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") self.vhost_user.send_expect( "export LD_LIBRARY_PATH=%s/%s/drivers:$LD_LIBRARY_PATH" - % (self.dut.base_dir, self.dut.target), + % (self.sut_node.base_dir, self.sut_node.target), "# ", ) self.virtio_user.send_expect( "export LD_LIBRARY_PATH=%s/%s/drivers:$LD_LIBRARY_PATH" - % (self.dut.base_dir, self.dut.target), + % (self.sut_node.base_dir, self.sut_node.target), "# ", ) # Prepare the result table @@ -72,33 +72,33 @@ class TestPVPShareLib(TestCase): self.result_table_create(self.table_header) def prepare_share_lib_env(self): - self.dut.build_install_dpdk( - self.dut.target, extra_options="-Dc_args=-DRTE_BUILD_SHARED_LIB" + self.sut_node.build_install_dpdk( + self.sut_node.target, extra_options="-Dc_args=-DRTE_BUILD_SHARED_LIB" ) def restore_env(self): - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.build_install_dpdk(self.sut_node.target) def send_and_verify(self): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ payload_size = 64 - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["tcp"] tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.tester.scapy_append( + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_node.scapy_append( 'wrpcap("%s/vhost.pcap", [Ether(dst="%s")/IP()/TCP()/("X"*%d)])' % (self.out_path, self.dst_mac, payload_size) ) tgen_input.append((tx_port, rx_port, "%s/vhost.pcap" % self.out_path)) - self.tester.scapy_execute() - self.tester.pktgen.clear_streams() + self.tg_node.scapy_execute() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, Pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, Pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) self.verify(Pps > 0, "%s can not receive packets" % (self.running_case)) Pps /= 1e6 Pct = (Pps * 100) / self.wirespeed(self.nic, 64, 1) @@ -112,7 +112,7 @@ class TestPVPShareLib(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -121,8 +121,8 @@ class TestPVPShareLib(TestCase): """ start testpmd on vhost """ - self.pci_info = self.dut.ports_info[0]["pci"] - eal_param = self.dut.create_eal_parameters( + self.pci_info = self.sut_node.ports_info[0]["pci"] + eal_param = self.sut_node.create_eal_parameters( socket=self.ports_socket, cores=self.core_list_vhost_user, prefix="vhost", @@ -143,7 +143,7 @@ class TestPVPShareLib(TestCase): """ start testpmd on virtio """ - eal_param = self.dut.create_eal_parameters( + eal_param = self.sut_node.create_eal_parameters( socket=self.ports_socket, cores=self.core_list_virtio_user, prefix="virtio-user", @@ -163,8 +163,8 @@ class TestPVPShareLib(TestCase): """ self.virtio_user.send_expect("quit", "# ", 60) self.vhost_user.send_expect("quit", "# ", 60) - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) def test_perf_pvp_share_lib_of_niantic(self): """ @@ -198,7 +198,7 @@ class TestPVPShareLib(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") def tear_down_all(self): """ diff --git a/tests/TestSuite_pvp_vhost_user_reconnect.py b/tests/TestSuite_pvp_vhost_user_reconnect.py index feb91bd4..7747239b 100644 --- a/tests/TestSuite_pvp_vhost_user_reconnect.py +++ b/tests/TestSuite_pvp_vhost_user_reconnect.py @@ -13,9 +13,9 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -23,18 +23,18 @@ class TestPVPVhostUserReconnect(TestCase): def set_up_all(self): # Get and verify the ports - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") # Get the port's socket - self.pf = self.dut_ports[0] - netdev = self.dut.ports_info[self.pf]["port"] - self.pci_info = self.dut.ports_info[0]["pci"] + self.pf = self.sut_ports[0] + netdev = self.sut_node.ports_info[self.pf]["port"] + self.pci_info = self.sut_node.ports_info[0]["pci"] self.socket = netdev.get_nic_socket() - self.cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) # set diff arg about mem_socket base on socket number - if len(set([int(core["socket"]) for core in self.dut.cores])) == 1: + if len(set([int(core["socket"]) for core in self.sut_node.cores])) == 1: self.socket_mem = "1024" else: self.socket_mem = "1024,1024" @@ -49,12 +49,12 @@ class TestPVPVhostUserReconnect(TestCase): self.checked_vm = False self.out_path = "/tmp/%s" % self.suite_name - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): @@ -62,10 +62,10 @@ class TestPVPVhostUserReconnect(TestCase): run before each test case. clear the execution ENV """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.vhost_user = self.dut.new_session(suite="vhost-user") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") def launch_testpmd_as_vhost_user(self): """ @@ -77,8 +77,8 @@ class TestPVPVhostUserReconnect(TestCase): i, i, ) - testcmd = self.dut.base_dir + "/%s" % self.path - eal_params = self.dut.create_eal_parameters( + testcmd = self.sut_node.base_dir + "/%s" % self.path + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, prefix="vhost", ports=[self.pci_info] ) para = " -- -i --port-topology=chained --nb-cores=1 --txd=1024 --rxd=1024" @@ -97,8 +97,8 @@ class TestPVPVhostUserReconnect(TestCase): i, i, ) - testcmd = self.dut.base_dir + "/%s" % self.path - eal_params = self.dut.create_eal_parameters( + testcmd = self.sut_node.base_dir + "/%s" % self.path + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, no_pci=True, prefix="vhost" ) para = " -- -i --nb-cores=1 --txd=1024 --rxd=1024" @@ -106,13 +106,13 @@ class TestPVPVhostUserReconnect(TestCase): self.vhost_user.send_expect(self.vhostapp_testcmd, "testpmd> ", 40) self.vhost_user.send_expect("start", "testpmd> ", 40) - def check_link_status_after_testpmd_start(self, dut_info): + def check_link_status_after_testpmd_start(self, sut_info): """ check the link status is up after testpmd start """ loop = 1 while loop <= 5: - out = dut_info.send_expect("show port info all", "testpmd> ", 120) + out = sut_info.send_expect("show port info all", "testpmd> ", 120) port_status = re.findall("Link\s*status:\s*([a-z]*)", out) if "down" not in port_status: break @@ -134,7 +134,7 @@ class TestPVPVhostUserReconnect(TestCase): if list(vm_config.params[i].keys())[0] == "qemu": self.vm_qemu_version = vm_config.params[i]["qemu"][0]["path"] - out = self.dut.send_expect("%s --version" % self.vm_qemu_version, "#") + out = self.sut_node.send_expect("%s --version" % self.vm_qemu_version, "#") result = re.search("QEMU\s*emulator\s*version\s*(\d*.\d*)", out) self.verify( result is not None, @@ -154,13 +154,13 @@ class TestPVPVhostUserReconnect(TestCase): """ start two VM """ - self.vm_dut = [] + self.vm_sut = [] self.vm = [] setting_args = "mrg_rxbuf=on,rx_queue_size=1024,tx_queue_size=1024" if packed is True: setting_args = "%s,packed=on" % setting_args for i in range(self.vm_num): - vm_info = VM(self.dut, "vm%d" % i, "vhost_sample_copy") + vm_info = VM(self.sut_node, "vm%d" % i, "vhost_sample_copy") vm_params = {} vm_params["driver"] = "vhost-user" vm_params["opt_path"] = "./vhost-net%d" % (i) @@ -171,14 +171,14 @@ class TestPVPVhostUserReconnect(TestCase): self.check_qemu_version(vm_info) try: - vm_dut = None - vm_dut = vm_info.start(bind_dev=bind_dev) - if vm_dut is None: + vm_sut = None + vm_sut = vm_info.start(bind_dev=bind_dev) + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print(utils.RED("Failure for %s" % str(e))) - self.verify(vm_dut is not None, "start vm failed") - self.vm_dut.append(vm_dut) + self.verify(vm_sut is not None, "start vm failed") + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def vm_testpmd_start(self): @@ -190,10 +190,10 @@ class TestPVPVhostUserReconnect(TestCase): + " -c 0x3 -n 4 " + "-- -i --port-topology=chained --txd=1024 --rxd=1024 " ) - for i in range(len(self.vm_dut)): - self.vm_dut[i].send_expect(vm_testpmd, "testpmd> ", 20) - self.vm_dut[i].send_expect("set fwd mac", "testpmd> ") - self.vm_dut[i].send_expect("start", "testpmd> ") + for i in range(len(self.vm_sut)): + self.vm_sut[i].send_expect(vm_testpmd, "testpmd> ", 20) + self.vm_sut[i].send_expect("set fwd mac", "testpmd> ") + self.vm_sut[i].send_expect("start", "testpmd> ") self.check_link_status_after_testpmd_start(self.vhost_user) @@ -201,9 +201,9 @@ class TestPVPVhostUserReconnect(TestCase): """ quit the testpmd in vm and stop all apps """ - for i in range(len(self.vm_dut)): - self.vm_dut[i].send_expect("stop", "testpmd> ", 20) - self.vm_dut[i].send_expect("quit", "# ", 20) + for i in range(len(self.vm_sut)): + self.vm_sut[i].send_expect("stop", "testpmd> ", 20) + self.vm_sut[i].send_expect("quit", "# ", 20) self.vm[i].stop() self.vhost_user.send_expect("quit", "# ", 20) @@ -211,17 +211,17 @@ class TestPVPVhostUserReconnect(TestCase): """ restore vm interfaces and config intf arp """ - for i in range(len(self.vm_dut)): - vm_intf = self.vm_dut[i].ports_info[0]["intf"] - self.vm_dut[i].send_expect( + for i in range(len(self.vm_sut)): + vm_intf = self.vm_sut[i].ports_info[0]["intf"] + self.vm_sut[i].send_expect( "ifconfig %s %s" % (vm_intf, self.virtio_ip[i]), "#", 10 ) - self.vm_dut[i].send_expect("ifconfig %s up" % vm_intf, "#", 10) + self.vm_sut[i].send_expect("ifconfig %s up" % vm_intf, "#", 10) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "arp -s %s %s" % (self.virtio_ip[1], self.virtio_mac[1]), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "arp -s %s %s" % (self.virtio_ip[0], self.virtio_mac[0]), "#", 10 ) @@ -229,10 +229,10 @@ class TestPVPVhostUserReconnect(TestCase): """ start iperf """ - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "iperf -s -p 12345 -i 1 > iperf_server.log &", "", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "iperf -c %s -p 12345 -i 1 -t 10 > iperf_client.log &" % self.virtio_ip[0], "", 60, @@ -244,7 +244,7 @@ class TestPVPVhostUserReconnect(TestCase): verify the Iperf test result """ # copy iperf_client file from vm1 - self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir) + self.vm_sut[1].session.copy_file_from("%s/iperf_client.log" % self.sut_node.base_dir) fp = open("./iperf_client.log") fmsg = fp.read() fp.close() @@ -263,27 +263,27 @@ class TestPVPVhostUserReconnect(TestCase): def send_and_verify(self, cycle=0, tinfo=""): frame_data = dict().fromkeys(self.frame_sizes, 0) for frame_size in self.frame_sizes: - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layers( + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layers( [ ("ether", {"dst": "%s" % self.dst_mac}), ("ipv4", {"dst": "%s" % self.dst1, "src": "%s" % self.src1}), ] ) - pkt.save_pcapfile(self.tester, "%s/reconnect.pcap" % self.out_path) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/reconnect.pcap" % self.out_path) tgenInput = [] - port = self.tester.get_local_port(self.pf) + port = self.tg_node.get_local_port(self.pf) tgenInput.append((port, port, "%s/reconnect.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) traffic_opt = { "delay": 30, } - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -359,7 +359,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from vhost self.logger.info("now reconnect from vhost") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") self.check_reconnect_perf() @@ -367,7 +367,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from qemu self.logger.info("now reconnect from vm") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms() self.vm_testpmd_start() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from VM") @@ -398,7 +398,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from vhost self.logger.info("now reconnect from vhost") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") self.check_reconnect_perf() @@ -406,7 +406,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from qemu self.logger.info("now reconnect from vm") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms() self.vm_testpmd_start() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from VM") @@ -434,7 +434,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from vhost self.logger.info("now reconnect from vhost") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user_with_no_pci() self.start_iperf() self.reconnect_data = self.iperf_result_verify( @@ -451,9 +451,9 @@ class TestPVPVhostUserReconnect(TestCase): self.logger.info("now reconnect from vm") vm_tmp = list() for i in range(self.reconnect_times): - self.vm_dut[0].send_expect("rm iperf_server.log", "# ", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "# ", 10) - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.vm_sut[0].send_expect("rm iperf_server.log", "# ", 10) + self.vm_sut[1].send_expect("rm iperf_client.log", "# ", 10) + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms(bind_dev=False) self.config_vm_intf() self.start_iperf() @@ -490,7 +490,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from vhost self.logger.info("now reconnect from vhost") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") self.check_reconnect_perf() @@ -498,7 +498,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from qemu self.logger.info("now reconnect from vm") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms(packed=True) self.vm_testpmd_start() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from VM") @@ -529,14 +529,14 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from vhost self.logger.info("now reconnect from vhost") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") self.check_reconnect_perf() # reconnet from qemu self.logger.info("now reconnect from vm") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms(packed=True) self.vm_testpmd_start() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from VM") @@ -564,7 +564,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from vhost self.logger.info("now reconnect from vhost") for i in range(self.reconnect_times): - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user_with_no_pci() self.start_iperf() self.reconnect_data = self.iperf_result_verify( @@ -580,9 +580,9 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from VM self.logger.info("now reconnect from vm") for i in range(self.reconnect_times): - self.vm_dut[0].send_expect("rm iperf_server.log", "# ", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "# ", 10) - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.vm_sut[0].send_expect("rm iperf_server.log", "# ", 10) + self.vm_sut[1].send_expect("rm iperf_client.log", "# ", 10) + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms(packed=True, bind_dev=False) self.config_vm_intf() self.start_iperf() @@ -604,13 +604,13 @@ class TestPVPVhostUserReconnect(TestCase): except Exception as e: self.logger.warning(e) finally: - self.dut.kill_all() - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.kill_all() + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") time.sleep(2) def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() pass diff --git a/tests/TestSuite_pvp_virtio_bonding.py b/tests/TestSuite_pvp_virtio_bonding.py index c7115ff0..166800bd 100644 --- a/tests/TestSuite_pvp_virtio_bonding.py +++ b/tests/TestSuite_pvp_virtio_bonding.py @@ -11,10 +11,10 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -23,37 +23,37 @@ class TestPVPVirtIOBonding(TestCase): # Get and verify the ports self.core_config = "1S/5C/1T" self.queues = 4 - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.ports_socket] ) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( self.cores_num >= 5, "There has not enought cores to test this suite %s" % self.suite_name, ) - cores = self.dut.get_core_list(self.core_config, socket=self.ports_socket) + cores = self.sut_node.get_core_list(self.core_config, socket=self.ports_socket) self.coremask = utils.create_mask(cores) - self.memory_channel = self.dut.get_memory_channels() - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.memory_channel = self.sut_node.get_memory_channels() + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.out_path = "/tmp/%s" % self.suite_name - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] def set_up(self): """ run before each test case. """ - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.start_testpmd_on_vhost() self.start_one_vm() @@ -69,8 +69,8 @@ class TestPVPVirtIOBonding(TestCase): ) params = "--port-topology=chained --nb-cores=4 --txd=1024 --rxd=1024" eal_param = "--file-prefix=vhost %s " % vdev_info - self.vhost_testpmd = PmdOutput(self.dut) - self.pci_info = self.dut.ports_info[0]["pci"] + self.vhost_testpmd = PmdOutput(self.sut_node) + self.pci_info = self.sut_node.ports_info[0]["pci"] self.vhost_testpmd.start_testpmd( self.core_config, params, eal_param=eal_param, ports=[self.pci_info] ) @@ -81,7 +81,7 @@ class TestPVPVirtIOBonding(TestCase): """ launch testpmd in VM """ - self.vm_testpmd = PmdOutput(self.vm_dut) + self.vm_testpmd = PmdOutput(self.vm_sut) self.vm_testpmd.start_testpmd("all", "--port-topology=chained --nb-cores=5") def create_bonded_device_in_vm(self, mode): @@ -146,19 +146,19 @@ class TestPVPVirtIOBonding(TestCase): start traffic and verify data stats on vhost and vm """ tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/bonding.pcap" % self.out_path) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/bonding.pcap" % self.out_path) tgen_input.append((tx_port, rx_port, "%s/bonding.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 10} - _, _ = self.tester.pktgen.measure_throughput( + _, _ = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -176,7 +176,7 @@ class TestPVPVirtIOBonding(TestCase): bootup one vm with four virtio-net devices """ virtio_mac = "52:54:00:00:00:0" - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") self.vm.load_config() vm_params = {} for i in range(self.queues): @@ -189,8 +189,8 @@ class TestPVPVirtIOBonding(TestCase): try: # Due to we have change the params info before, # so need to start vm with load_config=False - self.vm_dut = self.vm.start(load_config=False) - if self.vm_dut is None: + self.vm_sut = self.vm.start(load_config=False) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -225,8 +225,8 @@ class TestPVPVirtIOBonding(TestCase): Run after each test case. """ self.stop_testpmd_and_vm() - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py index 7fe0bcbb..a863f064 100644 --- a/tests/TestSuite_pvp_virtio_user_2M_hugepages.py +++ b/tests/TestSuite_pvp_virtio_user_2M_hugepages.py @@ -10,9 +10,9 @@ vhost/virtio-user pvp with 2M hugepage. import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPVPVirtioWith2Mhuge(TestCase): @@ -20,19 +20,19 @@ class TestPVPVirtioWith2Mhuge(TestCase): """ Run at the start of each test suite. """ - hugepages_size = self.dut.send_expect( + hugepages_size = self.sut_node.send_expect( "awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# " ) self.verify( int(hugepages_size) == 2048, "Please config you hugepages_size to 2048" ) self.core_config = "1S/4C/1T" - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list( + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( len(self.core_list) >= 4, "There has not enought cores to test this suite %s" % self.suite_name, @@ -40,7 +40,7 @@ class TestPVPVirtioWith2Mhuge(TestCase): self.core_list_virtio_user = self.core_list[0:2] self.core_list_vhost_user = self.core_list[2:4] - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.header_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["tcp"] self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.logger.info( @@ -50,24 +50,24 @@ class TestPVPVirtioWith2Mhuge(TestCase): if "packet_sizes" in self.get_suite_cfg(): self.frame_sizes = self.get_suite_cfg()["packet_sizes"] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.pci_info = self.dut.ports_info[0]["pci"] + self.pktgen_helper = TrafficGeneratorStream() + self.pci_info = self.sut_node.ports_info[0]["pci"] self.number_of_ports = 1 - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user = self.dut.new_session(suite="virtio-user") + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") # Prepare the result table self.table_header = ["Frame"] self.table_header.append("Mode") @@ -95,25 +95,25 @@ class TestPVPVirtioWith2Mhuge(TestCase): def send_and_verify(self): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: payload_size = frame_size - self.header_size tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.tester.scapy_append( + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_node.scapy_append( 'wrpcap("%s/vhost.pcap", [Ether(dst="%s")/IP()/TCP()/("X"*%d)])' % (self.out_path, self.dst_mac, payload_size) ) tgen_input.append((tx_port, rx_port, "%s/vhost.pcap" % self.out_path)) - self.tester.scapy_execute() - self.tester.pktgen.clear_streams() + self.tg_node.scapy_execute() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 self.verify( Mpps > self.check_value[frame_size], @@ -134,7 +134,7 @@ class TestPVPVirtioWith2Mhuge(TestCase): start testpmd on vhost """ vdev = ["net_vhost0,iface=vhost-net,queues=1"] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_vhost_user, prefix="vhost", ports=[self.pci_info], @@ -153,7 +153,7 @@ class TestPVPVirtioWith2Mhuge(TestCase): if not packed else "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,queues=1,packed_vq=1" ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_virtio_user, no_pci=True, prefix="virtio-user", @@ -170,8 +170,8 @@ class TestPVPVirtioWith2Mhuge(TestCase): """ self.virtio_user.send_expect("quit", "# ", 60) self.vhost_user.send_expect("quit", "# ", 60) - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) def test_perf_pvp_virtio_user_split_ring_2M_hugepages(self): """ @@ -197,7 +197,7 @@ class TestPVPVirtioWith2Mhuge(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") def tear_down_all(self): """ diff --git a/tests/TestSuite_pvp_virtio_user_4k_pages.py b/tests/TestSuite_pvp_virtio_user_4k_pages.py index 3e3e09b4..c216dc1b 100644 --- a/tests/TestSuite_pvp_virtio_user_4k_pages.py +++ b/tests/TestSuite_pvp_virtio_user_4k_pages.py @@ -10,9 +10,9 @@ vhost/virtio-user pvp with 4K pages. import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPvpVirtioUser4kPages(TestCase): @@ -21,27 +21,27 @@ class TestPvpVirtioUser4kPages(TestCase): Run at the start of each test suite. """ self.core_config = "1S/4C/1T" - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( self.cores_num >= 4, "There has not enought cores to test this suite %s" % self.suite_name, ) # for this suite, only support for vfio-pci - self.dut.send_expect("modprobe vfio-pci", "# ") - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + self.sut_node.send_expect("modprobe vfio-pci", "# ") + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver("vfio-pci") - self.core_list = self.dut.get_core_list( + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_virtio_user = self.core_list[0:2] self.core_list_vhost_user = self.core_list[2:4] - self.pci_info = self.dut.ports_info[0]["pci"] - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.frame_sizes = [64, 128, 256, 512, 1024, 1518] self.logger.info( "You can config packet_size in file %s.cfg," % self.suite_name @@ -51,23 +51,23 @@ class TestPvpVirtioUser4kPages(TestCase): self.frame_sizes = self.get_suite_cfg()["packet_sizes"] self.out_path = "/tmp/%s" % self.suite_name - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.number_of_ports = 1 - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user = self.dut.new_session(suite="virtio-user") + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") # Prepare the result table self.table_header = ["Frame"] self.table_header.append("Mode") @@ -95,22 +95,22 @@ class TestPvpVirtioUser4kPages(TestCase): def send_and_verify(self): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.frame_sizes: tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/vhost.pcap" % self.out_path) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/vhost.pcap" % self.out_path) tgen_input.append((tx_port, rx_port, "%s/vhost.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 self.verify( Mpps > self.check_value[frame_size], @@ -133,7 +133,7 @@ class TestPvpVirtioUser4kPages(TestCase): testcmd = self.app_testpmd_path + " " vdev = "net_vhost0,iface=vhost-net,queues=1" para = " -- -i --no-numa --socket-num=%d" % self.ports_socket - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_vhost_user, prefix="vhost", ports=[self.pci_info], @@ -153,7 +153,7 @@ class TestPvpVirtioUser4kPages(TestCase): if not packed else "net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net,packed_vq=1,queues=1" ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_virtio_user, prefix="virtio-user", ports=[self.pci_info], @@ -168,11 +168,11 @@ class TestPvpVirtioUser4kPages(TestCase): """ Prepare tmpfs with 4K-pages """ - self.dut.send_expect("mkdir -p /mnt/tmpfs_nohuge", "# ") - self.dut.send_expect("mount tmpfs /mnt/tmpfs_nohuge -t tmpfs -o size=4G", "# ") + self.sut_node.send_expect("mkdir -p /mnt/tmpfs_nohuge", "# ") + self.sut_node.send_expect("mount tmpfs /mnt/tmpfs_nohuge -t tmpfs -o size=4G", "# ") def restore_env_of_tmpfs_for_4k(self): - self.dut.send_expect("umount /mnt/tmpfs_nohuge", "# ") + self.sut_node.send_expect("umount /mnt/tmpfs_nohuge", "# ") def close_all_apps(self): """ @@ -180,8 +180,8 @@ class TestPvpVirtioUser4kPages(TestCase): """ self.virtio_user.send_expect("quit", "# ", 60) self.vhost_user.send_expect("quit", "# ", 60) - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) def test_perf_pvp_virtio_user_split_ring_with_4K_pages(self): """ @@ -209,7 +209,7 @@ class TestPvpVirtioUser4kPages(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") self.restore_env_of_tmpfs_for_4k() def tear_down_all(self): diff --git a/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py b/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py index 92433641..681796f6 100644 --- a/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py +++ b/tests/TestSuite_pvp_virtio_user_multi_queues_port_restart.py @@ -13,10 +13,10 @@ non-mergeable path, also cover port restart test with each path. import re import time -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): @@ -25,27 +25,27 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): Run at the start of each test suite. """ self.frame_sizes = [64] - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") # get core mask - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list("all", socket=self.ports_socket) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list("all", socket=self.ports_socket) + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.pci_info = self.dut.ports_info[0]["pci"] - self.vhost_pmd_session = self.dut.new_session(suite="vhost-user") - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) + self.pktgen_helper = TrafficGeneratorStream() + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.vhost_pmd_session = self.sut_node.new_session(suite="vhost-user") + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) self.queue_number = 2 - self.dut.kill_all() + self.sut_node.kill_all() self.number_of_ports = 1 - self.vhost_pmd = PmdOutput(self.dut, self.vhost_pmd_session) - self.virtio_user_pmd = PmdOutput(self.dut) - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost_pmd_session) + self.virtio_user_pmd = PmdOutput(self.sut_node) + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] def set_up(self): @@ -53,7 +53,7 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): Run before each test case. """ # Clean the execution ENV - self.dut.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") # Prepare the result table self.table_header = [ "FrameSize(B)", @@ -68,8 +68,8 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): """ start testpmd on vhost """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") vdev = "'net_vhost0,iface=vhost-net,queues=2,client=0'" param = "--nb-cores=2 --rxq={} --txq={} --rss-ip".format( self.queue_number, self.queue_number @@ -85,7 +85,7 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -161,21 +161,21 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): """ start to send packet and get the throughput """ - pkt = Packet(pkt_type="IP_RAW", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/pvp_multipath.pcap" % (self.out_path)) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/pvp_multipath.pcap" % (self.out_path)) tgenInput = [] - port = self.tester.get_local_port(self.dut_ports[0]) + port = self.tg_node.get_local_port(self.sut_ports[0]) tgenInput.append((port, port, "%s/pvp_multipath.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() fields_config = {"ip": {"dst": {"action": "random"}}} streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, fields_config, self.tester.pktgen + tgenInput, 100, fields_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -359,11 +359,11 @@ class TestPVPVirtioUserMultiQueuesPortRestart(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_pmd_session) + self.sut_node.close_session(self.vhost_pmd_session) diff --git a/tests/TestSuite_qinq_filter.py b/tests/TestSuite_qinq_filter.py index bf4d9a4d..b2fe129f 100644 --- a/tests/TestSuite_qinq_filter.py +++ b/tests/TestSuite_qinq_filter.py @@ -22,8 +22,8 @@ class TestQinqFilter(TestCase): Run at the start of each test suite. """ - global dutRxPortId - global dutTxPortId + global sutRxPortId + global sutTxPortId self.verify( self.nic @@ -40,25 +40,25 @@ class TestQinqFilter(TestCase): print( "this case only supports Intel® Ethernet 700 Series with 6.0.0+ firmware and dpdk17.05+" ) - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() # Verify that enough ports are available self.verify(len(ports) >= 1, "Insufficient ports") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] - dutRxPortId = valports[0] - dutTxPortId = valports[0] + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] + sutRxPortId = valports[0] + sutTxPortId = valports[0] - port = self.tester.get_local_port(dutRxPortId) - self.txItf = self.tester.get_interface(port) - self.smac = self.tester.get_mac(port) - # the packet dest mac must is dut tx port id when the port promisc is off - self.dmac = self.dut.get_mac_address(dutRxPortId) + port = self.tg_node.get_local_port(sutRxPortId) + self.txItf = self.tg_node.get_interface(port) + self.smac = self.tg_node.get_mac(port) + # the packet dest mac must is SUT tx port id when the port promisc is off + self.dmac = self.sut_node.get_mac_address(sutRxPortId) self.portMask = utils.create_mask(valports[:1]) - cores = self.dut.get_core_list("1S/2C/1T") + cores = self.sut_node.get_core_list("1S/2C/1T") self.coreMask = utils.create_mask(cores) - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] def vlan_send_packet(self, vlans): """ @@ -74,8 +74,8 @@ class TestQinqFilter(TestCase): % self.txItf ) - self.tester.scapy_append(vlanString) - self.tester.scapy_execute() + self.tg_node.scapy_append(vlanString) + self.tg_node.scapy_execute() def creat_pcap(self, vlans_list): """ @@ -90,8 +90,8 @@ class TestQinqFilter(TestCase): / IP(src="192.168.0.1", dst="192.168.0.2") / Raw("x" * 20) ) - self.tester.send_expect("rm -rf tmp_qinq.pcap", "# ") - self.tester.send_expect("rm -rf dst_qinq.pcap", "# ") + self.tg_node.send_expect("rm -rf tmp_qinq.pcap", "# ") + self.tg_node.send_expect("rm -rf dst_qinq.pcap", "# ") wrpcap("tmp_qinq.pcap", packets) fr = open("tmp_qinq.pcap", "rb") @@ -116,15 +116,15 @@ class TestQinqFilter(TestCase): def config_vfs(self, port_id, vfs): """ - if vfs is 0, call destroy_sriov_vfs_by_port in dut for destory vf. + if vfs is 0, call destroy_sriov_vfs_by_port in SUT for destory vf. if vfs > 0, call generate_sriov_vfs_by_port generate vf and bind igb_uio to vf """ if vfs: - self.dut.generate_sriov_vfs_by_port(port_id, vfs, "igb_uio") - for port in self.dut.ports_info[port_id]["vfs_port"]: + self.sut_node.generate_sriov_vfs_by_port(port_id, vfs, "igb_uio") + for port in self.sut_node.ports_info[port_id]["vfs_port"]: port.bind_driver("igb_uio") else: - self.dut.destroy_sriov_vfs_by_port(port_id) + self.sut_node.destroy_sriov_vfs_by_port(port_id) def set_up(self): """ @@ -139,33 +139,33 @@ class TestQinqFilter(TestCase): self.logger.info( "\r\n-------------------------this case only support novector mode to start testpmd!-------------------------\r\n" ) - pmd_out = PmdOutput(self.dut) + pmd_out = PmdOutput(self.sut_node) pmd_out.start_testpmd( "1S/2C/1T", eal_param="--force-max-simd-bitwidth=64", param="--portmask=%s --port-topology=loop --rxq=4 --txq=4 --disable-rss" % self.portMask, ) - self.dut.send_expect("vlan set extend on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set strip on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set qinq_strip on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("vlan set extend on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set qinq_strip on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(5) self.vlan_send_packet([(0x8100, 2), (0x8100, 3)]) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify("QinQ VLAN" in out, "dual vlan not received:" + str(out)) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def test_qinq_filter_PF_queues(self): """ qinq filter packet received by assign PF queues """ - eal_para = self.dut.create_eal_parameters(cores="1S/2C/1T") - self.dut.send_expect( + eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/1T") + self.sut_node.send_expect( r"%s %s -- -i \ --portmask=%s --port-topology=loop \ --rxq=4 --txq=4 --disable-rss" @@ -173,79 +173,79 @@ class TestQinqFilter(TestCase): "testpmd> ", 30, ) - self.dut.send_expect("vlan set extend on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("vlan set extend on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(5) # out vlan 1, inner vlan 4093 packet will received by PF queue 1 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 1 / vlan tci is 4093 / end actions pf / queue index 1 / end", "testpmd> ", ) # out vlan 2, inner vlan 4094 packet will received by PF queue 1 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 2 / vlan tci is 4094 / end actions pf / queue index 2 / end", "testpmd> ", ) self.vlan_send_packet([(0x8100, 1), (0x8100, 4093)]) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify( "queue 1: received 1 packets" in out, "out vlan 1, inner vlan 4093 received not by queue 1 : %s" % out, ) self.vlan_send_packet([(0x8100, 2), (0x8100, 4094)]) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify( "queue 2: received 1 packets" in out, "out vlan 1, inner vlan 4093 received not by queue 2 : %s" % out, ) - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("quit", "#") def test_qinq_packet_filter_VF_queues(self): """ qinq filter packet received by assign VF queues """ - self.config_vfs(dutRxPortId, 2) - vf_list = self.dut.ports_info[dutRxPortId]["sriov_vfs_pci"] + self.config_vfs(sutRxPortId, 2) + vf_list = self.sut_node.ports_info[sutRxPortId]["sriov_vfs_pci"] self.verify(len(vf_list) == 2, "config 2 vf failed: %s" % str(vf_list)) - vf0_session = self.dut.new_session("qinq_filter") - vf1_session = self.dut.new_session("qinq_filter") + vf0_session = self.sut_node.new_session("qinq_filter") + vf1_session = self.sut_node.new_session("qinq_filter") - eal_para = self.dut.create_eal_parameters( + eal_para = self.sut_node.create_eal_parameters( cores="1S/2C/1T", prefix="pf", - ports=[self.dut.ports_info[dutRxPortId]["pci"]], + ports=[self.sut_node.ports_info[sutRxPortId]["pci"]], ) - self.dut.send_expect( + self.sut_node.send_expect( r"%s %s -- -i --port-topology=loop \ --rxq=4 --txq=4 --disable-rss" % (self.path, eal_para), "testpmd> ", 30, ) - self.dut.send_expect("vlan set extend on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("vlan set extend on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) # out vlan 1, inner vlan 4093 packet will received by vf0 queue 2 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 1 / vlan tci is 4093 / end actions vf id 0 / queue index 2 / end", "testpmd> ", ) # out vlan 2, inner vlan 4094 packet will received by vf1 queue 3 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 2 / vlan tci is 4094 / end actions vf id 1 / queue index 3 / end", "testpmd> ", ) # out vlan 3, inner vlan 4094 packet will received by pf queue 1 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 3 / vlan tci is 4094 / end actions pf / queue index 1 / end", "testpmd> ", ) @@ -279,13 +279,13 @@ class TestQinqFilter(TestCase): ]: self.vlan_send_packet(vlan_config) - dut_out = self.dut.get_session_output() + sut_out = self.sut_node.get_session_output() vf0_out = vf0_session.get_session_before(30) vf1_out = vf1_session.get_session_before(30) error_message = "" - if "queue 1: received 1 packets" not in dut_out: - error_message += "dut testpmd received packt queue error: %s" % dut_out + if "queue 1: received 1 packets" not in sut_out: + error_message += "SUT testpmd received packt queue error: %s" % sut_out elif "queue 2: received 1 packets" not in vf0_out: error_message += " vf0 testpmd received packt queue error: %s" % vf0_out elif "queue 3: received 1 packets" not in vf1_out: @@ -293,11 +293,11 @@ class TestQinqFilter(TestCase): for session_name in [vf0_session, vf1_session]: session_name.send_expect("quit", "#") - self.dut.close_session(session_name) - self.dut.send_expect("quit", "#") + self.sut_node.close_session(session_name) + self.sut_node.send_expect("quit", "#") - self.config_vfs(dutRxPortId, 0) - vf_list = self.dut.ports_info[dutRxPortId]["sriov_vfs_pci"] + self.config_vfs(sutRxPortId, 0) + vf_list = self.sut_node.ports_info[sutRxPortId]["sriov_vfs_pci"] self.verify(len(vf_list) == 0, "destroy vf failed: %s" % str(vf_list)) self.verify(not error_message, error_message) @@ -306,47 +306,47 @@ class TestQinqFilter(TestCase): """ qinq filter packet with different tpid received by assign VF queues """ - self.config_vfs(dutRxPortId, 2) - vf_list = self.dut.ports_info[dutRxPortId]["sriov_vfs_pci"] + self.config_vfs(sutRxPortId, 2) + vf_list = self.sut_node.ports_info[sutRxPortId]["sriov_vfs_pci"] self.verify(len(vf_list) == 2, "config 2 vf failed: %s" % str(vf_list)) - vf0_session = self.dut.new_session("qinq_filter") - vf1_session = self.dut.new_session("qinq_filter") + vf0_session = self.sut_node.new_session("qinq_filter") + vf1_session = self.sut_node.new_session("qinq_filter") - eal_para = self.dut.create_eal_parameters( + eal_para = self.sut_node.create_eal_parameters( cores="1S/2C/1T", prefix="pf", - ports=[self.dut.ports_info[dutRxPortId]["pci"]], + ports=[self.sut_node.ports_info[sutRxPortId]["pci"]], ) - self.dut.send_expect( + self.sut_node.send_expect( r"%s %s -- -i --port-topology=loop \ --rxq=4 --txq=4 --disable-rss" % (self.path, eal_para), "testpmd> ", 30, ) - self.dut.send_expect("vlan set extend on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("vlan set extend on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(5) # out vlan 1, inner vlan 4093 packet will received by vf0 queue 2 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 1 / vlan tci is 4093 / end actions vf id 0 / queue index 2 / end", "testpmd> ", ) # out vlan 2, inner vlan 4094 packet will received by vf1 queue 3 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 2 / vlan tci is 4094 / end actions vf id 1 / queue index 3 / end", "testpmd> ", ) # out vlan 3, inner vlan 4094 packet will received by pf queue 1 - self.dut.send_expect( + self.sut_node.send_expect( r"flow create 0 ingress pattern eth / vlan tci is 3 / vlan tci is 4094 / end actions pf / queue index 1 / end", "testpmd> ", ) - self.dut.send_expect("vlan set outer tpid 0x88a8 0", "testpmd") + self.sut_node.send_expect("vlan set outer tpid 0x88a8 0", "testpmd") vf0_session.send_expect( r"%s -c %s -n 4 \ @@ -373,18 +373,18 @@ class TestQinqFilter(TestCase): self.creat_pcap([(1, 4093), (2, 4094), (3, 4094)]) - self.tester.scapy_append('pcap = rdpcap("/root/dst_qinq.pcap")') - self.tester.scapy_append('sendp(pcap, iface="%s")' % self.txItf) - self.tester.scapy_execute() + self.tg_node.scapy_append('pcap = rdpcap("/root/dst_qinq.pcap")') + self.tg_node.scapy_append('sendp(pcap, iface="%s")' % self.txItf) + self.tg_node.scapy_execute() time.sleep(5) - dut_out = self.dut.get_session_output() + sut_out = self.sut_node.get_session_output() vf0_out = vf0_session.get_session_before(30) vf1_out = vf1_session.get_session_before(30) error_message = "" - if "queue 1: received 1 packets" not in dut_out: - error_message += "dut testpmd received packt queue error: %s" % dut_out + if "queue 1: received 1 packets" not in sut_out: + error_message += "SUT testpmd received packt queue error: %s" % sut_out elif "queue 2: received 1 packets" not in vf0_out: error_message += " vf0 testpmd received packt queue error: %s" % vf0_out elif "queue 3: received 1 packets" not in vf1_out: @@ -392,11 +392,11 @@ class TestQinqFilter(TestCase): for session_name in [vf0_session, vf1_session]: session_name.send_expect("quit", "#") - self.dut.close_session(session_name) - self.dut.send_expect("quit", "#") + self.sut_node.close_session(session_name) + self.sut_node.send_expect("quit", "#") - self.config_vfs(dutRxPortId, 0) - vf_list = self.dut.ports_info[dutRxPortId]["sriov_vfs_pci"] + self.config_vfs(sutRxPortId, 0) + vf_list = self.sut_node.ports_info[sutRxPortId]["sriov_vfs_pci"] self.verify(len(vf_list) == 0, "destroy vf failed: %s" % str(vf_list)) self.verify(not error_message, error_message) @@ -405,7 +405,7 @@ class TestQinqFilter(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_qos_api.py b/tests/TestSuite_qos_api.py index 56065c3b..e5a435a3 100644 --- a/tests/TestSuite_qos_api.py +++ b/tests/TestSuite_qos_api.py @@ -13,9 +13,9 @@ import string import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestQosApi(TestCase): @@ -25,28 +25,28 @@ class TestQosApi(TestCase): """ # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() - self.dut_ports = self.dut.get_ports(self.nic) + ports = self.sut_node.get_ports() + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available self.verify(len(ports) >= 2, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(ports[0]) + self.ports_socket = self.sut_node.get_numa_id(ports[0]) # each flow to 200Mbps self.bps = 200000000 self.bps_rate = [0, 0.1] self.eal_param = " --main-lcore=1" # Verify that enough threads are available - cores = self.dut.get_core_list("1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") global P0, P1 P0 = ports[0] P1 = ports[1] - self.txItf = self.tester.get_interface(self.tester.get_local_port(P0)) - self.rxItf = self.tester.get_interface(self.tester.get_local_port(P1)) - self.dmac = self.dut.get_mac_address(P0) - self.host_testpmd = PmdOutput(self.dut) + self.txItf = self.tg_node.get_interface(self.tg_node.get_local_port(P0)) + self.rxItf = self.tg_node.get_interface(self.tg_node.get_local_port(P1)) + self.dmac = self.sut_node.get_mac_address(P0) + self.host_testpmd = PmdOutput(self.sut_node) # get dts output path if self.logger.log_path.startswith(os.sep): @@ -55,7 +55,7 @@ class TestQosApi(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -63,33 +63,33 @@ class TestQosApi(TestCase): """ def add_root_non_leaf_node(self): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", "testpmd> " ) def add_private_shaper(self, n): for i in range(n): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm node shaper profile 1 %s 0 0 25000000 0 0" % str(i + 1), "testpmd> ", ) def add_private_shaper_ixgbe(self, n): for i in range(n): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm node shaper profile 1 %s 0 0 25000000 0 0" % i, "testpmd> " ) def add_tc_node(self, n): for i in range(n): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm nonleaf node 1 %s 1000000 0 1 1 1 1 0 0" % (900000 + i), "testpmd> ", ) def add_tc_node_ixgbe(self, n): for i in range(n): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm nonleaf node 1 %s 1000000 0 1 1 -1 1 0 0" % (900000 + i), "testpmd> ", ) @@ -98,17 +98,17 @@ class TestQosApi(TestCase): """ set DCB """ - self.dut.send_expect("port stop all", "testpmd> ") - for i in range(len(self.dut.ports_info)): - self.dut.send_expect( + self.sut_node.send_expect("port stop all", "testpmd> ") + for i in range(len(self.sut_node.ports_info)): + self.sut_node.send_expect( "port config %s dcb vt off %s pfc off" % (i, n), "testpmd> " ) - self.dut.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") def scapy_send_packet_verify(self, n): - self.host_testpmd.wait_link_status_up(self.dut_ports[0]) - self.tester.scapy_foreground() - dmac = self.dut.get_mac_address(P0) + self.host_testpmd.wait_link_status_up(self.sut_ports[0]) + self.tg_node.scapy_foreground() + dmac = self.sut_node.get_mac_address(P0) queues_4tc = [0, 32, 64, 96] queues_8tc = [0, 16, 32, 48, 64, 80, 96, 112] print(dmac) @@ -117,10 +117,10 @@ class TestQosApi(TestCase): "Ether(dst='%s', src='00:02:00:00:00:01')/Dot1Q(prio=%s)/IP()/Raw('x'*20)" % (dmac, i) ) - self.tester.scapy_append('sendp([%s], iface="%s")' % (pkt, self.txItf)) - self.tester.scapy_execute() + self.tg_node.scapy_append('sendp([%s], iface="%s")' % (pkt, self.txItf)) + self.tg_node.scapy_execute() time.sleep(2) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() if self.kdriver == "i40e": self.verify( "queue %s" % i in out and dmac.upper() in out, @@ -140,10 +140,10 @@ class TestQosApi(TestCase): def queue_map_test(self, n): self.set_dcb(n) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.scapy_send_packet_verify(n) def shaping_tc_test_i40e(self, n): @@ -152,8 +152,8 @@ class TestQosApi(TestCase): self.add_private_shaper(n) self.add_tc_node(n) self.add_queue_leaf_node(n) - self.dut.send_expect("port tm hierarchy commit 1 no", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port tm hierarchy commit 1 no", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.perf_test(n) def test_dcb_4tc_queue_map_i40e(self): @@ -182,16 +182,16 @@ class TestQosApi(TestCase): " --nb-cores=4 --txq=4 --rxq=4 --rss-ip ", eal_param=self.eal_param, ) - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect( "add port tm node shaper profile 1 0 0 0 25000000 0 0", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "add port tm nonleaf node 1 1000000 -1 0 1 0 0 1 0 0", "testpmd> " ) - self.dut.send_expect("port tm hierarchy commit 1 no", "testpmd> ") - self.dut.send_expect("port start 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port tm hierarchy commit 1 no", "testpmd> ") + self.sut_node.send_expect("port start 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.perf_test(4) def test_perf_shaping_1port_4tc_i40e(self): @@ -254,13 +254,13 @@ class TestQosApi(TestCase): self.add_tc_node_ixgbe(n) self.add_private_shaper_ixgbe(n) self.add_queue_leaf_node_ixgbe(n) - self.dut.send_expect("port tm hierarchy commit 1 no", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port tm hierarchy commit 1 no", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.perf_test(n) def perf_test(self, n): - self.host_testpmd.wait_link_status_up(self.dut_ports[0]) - dmac = self.dut.get_mac_address(self.dut_ports[0]) + self.host_testpmd.wait_link_status_up(self.sut_ports[0]) + dmac = self.sut_node.get_mac_address(self.sut_ports[0]) pkts = [] for i in range(n): pkt = ( @@ -271,25 +271,25 @@ class TestQosApi(TestCase): for i in range(n): flow = pkts[i] pcap = os.sep.join([self.output_path, "test.pcap"]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, flow)) + self.tg_node.scapy_execute() tgenInput = [] pcap = os.sep.join([self.output_path, "test.pcap"]) tgenInput.append( ( - self.tester.get_local_port(self.dut_ports[0]), - self.tester.get_local_port(self.dut_ports[1]), + self.tg_node.get_local_port(self.sut_ports[0]), + self.tg_node.get_local_port(self.sut_ports[1]), pcap, ) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) traffic_opt = {"delay": 10} - bps, pps = self.tester.pktgen.measure_throughput( + bps, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) bps_rate = abs(float(self.bps) - bps) / self.bps @@ -301,7 +301,7 @@ class TestQosApi(TestCase): def add_queue_leaf_node(self, n): for i in range(n): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm leaf node 1 %s %s 0 1 2 -1 0 0xffffffff 0 0" % (i, 900000 + i), "testpmd> ", @@ -309,7 +309,7 @@ class TestQosApi(TestCase): def add_queue_leaf_node_ixgbe(self, n): for i in range(n): - self.dut.send_expect( + self.sut_node.send_expect( "add port tm leaf node 1 %s %s 0 1 2 0 0 0xffffffff 0 0" % (i, 900000 + i), "testpmd> ", @@ -319,7 +319,7 @@ class TestQosApi(TestCase): """ Run after each test case. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): """ diff --git a/tests/TestSuite_qos_meter.py b/tests/TestSuite_qos_meter.py index b1d13a32..79096756 100644 --- a/tests/TestSuite_qos_meter.py +++ b/tests/TestSuite_qos_meter.py @@ -5,14 +5,13 @@ """ DPDK Test suite. Test QOS API in DPDK. -The DUT must have two 10G Ethernet ports connected to two ports of IXIA. +The SUT must have two 10G Ethernet ports connected to two ports of IXIA. """ import os -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT from framework.pmd_output import PmdOutput -from framework.settings import HEADER_SIZE +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import HEADER_SIZE, TRANSMIT_CONT from framework.test_case import TestCase @@ -22,11 +21,11 @@ class TestQosMeter(TestCase): ip_fragmentation Prerequisites """ # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() - self.dut_ports = self.dut.get_ports(self.nic) - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.rx_port = self.tester.get_local_port(self.dut_ports[1]) - self.pmdout = PmdOutput(self.dut) + ports = self.sut_node.get_ports() + self.sut_ports = self.sut_node.get_ports(self.nic) + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.rx_port = self.tg_node.get_local_port(self.sut_ports[1]) + self.pmdout = PmdOutput(self.sut_node) # Verify that enough ports are available self.verify(len(ports) >= 2, "Insufficient ports for testing") @@ -46,41 +45,41 @@ class TestQosMeter(TestCase): Build app and send pkt return bps and pps """ - self.dut.send_expect("rm -rf ./examples/qos_meter/build", "#") - out = self.dut.build_dpdk_apps("./examples/qos_meter") + self.sut_node.send_expect("rm -rf ./examples/qos_meter/build", "#") + out = self.sut_node.build_dpdk_apps("./examples/qos_meter") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores="1S/1C/1T", fixed_prefix=True, prefix="qos_meter" ) - app_name = self.dut.apps_name["qos_meter"] + app_name = self.sut_node.apps_name["qos_meter"] cmd = app_name + eal_params + "-- -p 0x3" - self.dut.send_expect(cmd, "TX = 1") + self.sut_node.send_expect(cmd, "TX = 1") payload_size = 64 - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - dts_mac = self.dut.get_mac_address(self.dut_ports[self.rx_port]) - pkt = Packet(pkt_type="IP_RAW") - pkt.save_pcapfile(self.tester, "%s/tester.pcap" % self.tester.tmp_file) + dts_mac = self.sut_node.get_mac_address(self.sut_ports[self.rx_port]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW") + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/tg.pcap" % self.tg_node.tmp_file) stream_option = { - "pcap": "%s/tester.pcap" % self.tester.tmp_file, + "pcap": "%s/tg.pcap" % self.tg_node.tmp_file, "stream_config": { "rate": 100, "transmit_mode": TRANSMIT_CONT, }, } - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() stream_ids = [] - stream_id = self.tester.pktgen.add_stream( - self.tx_port, self.rx_port, "%s/tester.pcap" % self.tester.tmp_file + stream_id = self.tg_node.perf_tg.add_stream( + self.tx_port, self.rx_port, "%s/tg.pcap" % self.tg_node.tmp_file ) - self.tester.pktgen.config_stream(stream_id, stream_option) + self.tg_node.perf_tg.config_stream(stream_id, stream_option) stream_ids.append(stream_id) - stream_id = self.tester.pktgen.add_stream( - self.rx_port, self.tx_port, "%s/tester.pcap" % self.tester.tmp_file + stream_id = self.tg_node.perf_tg.add_stream( + self.rx_port, self.tx_port, "%s/tg.pcap" % self.tg_node.tmp_file ) - self.tester.pktgen.config_stream(stream_id, stream_option) + self.tg_node.perf_tg.config_stream(stream_id, stream_option) stream_ids.append(stream_id) traffic_opt = {"method": "throughput", "rate": 100, "duration": 20} - bps, pps = self.tester.pktgen.measure(stream_ids, traffic_opt) + bps, pps = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) return bps, pps def verify_throughput(self, throughput, pps): @@ -94,11 +93,11 @@ class TestQosMeter(TestCase): """ srTCM blind RED """ - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/#define APP_PKT_COLOR_POS/s/[0-9]/5/g' ./examples/qos_meter/main.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/^#define APP_MODE /s/APP_MODE_*/APP_MODE_SRTCM_COLOR_BLIND/2' ./examples/qos_meter/main.c", "#", ) @@ -109,11 +108,11 @@ class TestQosMeter(TestCase): """ srTCM blind GREEN """ - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/#define APP_PKT_COLOR_POS/s/[0-9]/3/g' ./examples/qos_meter/main.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/^#define APP_MODE /s/APP_MODE_*/APP_MODE_SRTCM_COLOR_BLIND/2' ./examples/qos_meter/main.c", "#", ) @@ -124,11 +123,11 @@ class TestQosMeter(TestCase): """ srTCM aware RED """ - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/#define APP_PKT_COLOR_POS/s/[0-9]/5/g' ./examples/qos_meter/main.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/^#define APP_MODE /s/APP_MODE_*/APP_MODE_SRTCM_COLOR_AWARE/2' ./examples/qos_meter/main.c", "#", ) @@ -139,11 +138,11 @@ class TestQosMeter(TestCase): """ trTCM blind """ - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/#define APP_PKT_COLOR_POS/s/[0-9]/5/g' ./examples/qos_meter/main.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/^#define APP_MODE /s/APP_MODE_*/APP_MODE_TRTCM_COLOR_BLIND/2' ./examples/qos_meter/main.c", "#", ) @@ -154,11 +153,11 @@ class TestQosMeter(TestCase): """ trTCM aware """ - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/#define APP_PKT_COLOR_POS/s/[0-9]/5/g' ./examples/qos_meter/main.c", "#", ) - self.dut.send_expect( + self.sut_node.send_expect( r"sed -i -e '/^#define APP_MODE /s/APP_MODE_*/APP_MODE_TRTCM_COLOR_AWARE/2' ./examples/qos_meter/main.c", "#", ) @@ -169,10 +168,10 @@ class TestQosMeter(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_queue_region.py b/tests/TestSuite_queue_region.py index f1c27962..385a0cb9 100644 --- a/tests/TestSuite_queue_region.py +++ b/tests/TestSuite_queue_region.py @@ -13,11 +13,11 @@ import re import time import framework.utils as utils -from framework.dut import Dut -from framework.packet import Packet from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import DRIVERS, HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase @@ -42,23 +42,23 @@ class TestQueue_region(TestCase): ) # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_intf = self.tester.get_interface(localPort) - self.tester_mac = self.tester.get_mac(localPort) - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pmdout = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_intf = self.tg_node.get_interface(localPort) + self.tg_mac = self.tg_node.get_mac(localPort) + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pmdout = PmdOutput(self.sut_node) self.cores = "1S/4C/1T" self.pmdout.start_testpmd("%s" % self.cores, "--rxq=16 --txq=16") - self.dut.send_expect("port config all rss all", "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("port config all rss all", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) def set_up(self): @@ -71,15 +71,15 @@ class TestQueue_region(TestCase): """ get the queue which packet enter. """ - outstring = self.dut.send_expect("stop", "testpmd> ") + outstring = self.sut_node.send_expect("stop", "testpmd> ") result_scanner = ( - r"Forward Stats for RX Port= %s/Queue=\s?([0-9]+)" % self.dut_ports[0] + r"Forward Stats for RX Port= %s/Queue=\s?([0-9]+)" % self.sut_ports[0] ) scanner = re.compile(result_scanner, re.DOTALL) m = scanner.search(outstring) queue_id = m.group(1) print("queue is %s" % queue_id) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") return queue_id def send_and_check( @@ -113,71 +113,71 @@ class TestQueue_region(TestCase): send different PCTYPE packets. """ if pkt_type == "udp": - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) elif pkt_type == "tcp": - pkt = Packet(pkt_type="TCP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("tcp", {"flags": flags}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="TCP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("tcp", {"flags": flags}) if flags == "S": - pkt.pktgen.pkt.__delitem__("Raw") + scapy_pkt_builder.scapy_pkt_util.pkt.__delitem__("Raw") elif pkt_type == "sctp": - pkt = Packet(pkt_type="SCTP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("sctp", {"tag": tag}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="SCTP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("sctp", {"tag": tag}) elif pkt_type == "ipv4": - pkt = Packet(pkt_type="IP_RAW") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("ipv4", {"frag": frag}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("ipv4", {"frag": frag}) elif pkt_type == "ipv6_udp": - pkt = Packet(pkt_type="IPv6_UDP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IPv6_UDP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) elif pkt_type == "ipv6_tcp": - pkt = Packet(pkt_type="IPv6_TCP") - pkt.config_layer("tcp", {"flags": flags}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IPv6_TCP") + scapy_pkt_builder.config_layer("tcp", {"flags": flags}) if flags == "S": - pkt.pktgen.pkt.__delitem__("Raw") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) + scapy_pkt_builder.scapy_pkt_util.pkt.__delitem__("Raw") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) elif pkt_type == "ipv6_sctp": - pkt = Packet(pkt_type="IPv6_SCTP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("sctp", {"tag": tag}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IPv6_SCTP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("sctp", {"tag": tag}) elif pkt_type == "ipv6": - pkt = Packet() - pkt.assign_layers(["ether", "ipv6", "raw"]) - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "ipv6", "raw"]) + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) elif pkt_type == "L2": - pkt = Packet() - pkt.assign_layers(["ether", "raw"]) - pkt.config_layer( - "ether", {"dst": mac, "src": self.tester_mac, "type": ethertype} + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "raw"]) + scapy_pkt_builder.config_layer( + "ether", {"dst": mac, "src": self.tg_mac, "type": ethertype} ) - pkt.send_pkt(self.tester, tx_port=self.tester_intf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf) def send_packet_up(self, mac, pkt_type="udp", prio=0): """ send different User Priority packets. """ if pkt_type == "ipv4": - pkt = Packet() - pkt.assign_layers(["ether", "vlan", "ipv4", "raw"]) - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("vlan", {"vlan": 0, "prio": prio}) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "vlan", "ipv4", "raw"]) + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 0, "prio": prio}) elif pkt_type == "udp": - pkt = Packet(pkt_type="VLAN_UDP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("vlan", {"vlan": 0, "prio": prio}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 0, "prio": prio}) elif pkt_type == "tcp": - pkt = Packet() - pkt.assign_layers(["ether", "vlan", "ipv4", "tcp", "raw"]) - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("vlan", {"vlan": 0, "prio": prio}) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "vlan", "ipv4", "tcp", "raw"]) + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 0, "prio": prio}) elif pkt_type == "ipv6_udp": - pkt = Packet() - pkt.assign_layers(["ether", "vlan", "ipv6", "udp", "raw"]) - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("vlan", {"vlan": 0, "prio": prio}) - pkt.send_pkt(self.tester, tx_port=self.tester_intf) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "vlan", "ipv6", "udp", "raw"]) + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 0, "prio": prio}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf) def get_and_compare_rules(self, out, QueueRegion_num, FlowType_num, UP_num): """ @@ -205,76 +205,76 @@ class TestQueue_region(TestCase): def test_pctype_map_queue_region(self): # set queue region on a port - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 1 queue_num 1", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 1 queue_start_index 3 queue_num 2", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 2 queue_start_index 6 queue_num 2", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 3 queue_start_index 8 queue_num 2", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 4 queue_start_index 11 queue_num 4", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 5 queue_start_index 15 queue_num 1", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 6 queue_start_index 5 queue_num 1", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 7 queue_start_index 10 queue_num 1", "testpmd> ", ) # Set the mapping of flowtype to region index on a port - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 flowtype 31", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 1 flowtype 32", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 2 flowtype 33", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 4 flowtype 35", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 6 flowtype 36", "testpmd> " ) if self.nic in ["I40E_10G-SFP_X722", "I40E_10G-10G_BASE_T_X722"]: - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 2 flowtype 39", "testpmd> " ) else: - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 2 flowtype 41", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 3 flowtype 43", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 4 flowtype 44", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 5 flowtype 45", "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 7 flowtype 46", "testpmd> " ) - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") # mapping table: # region | queue | flowtype | packet_type @@ -351,10 +351,10 @@ class TestQueue_region(TestCase): # clear all the queue region configuration # check if there is 1 flow rule have been created - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 8, 10, 0) - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 0, 0, 0) # confirm packet not to the same queue after flush all the queue regions rules. @@ -381,25 +381,25 @@ class TestQueue_region(TestCase): def test_up_map_queue_region(self): # set queue region on a port - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 14 queue_num 2", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 6 queue_start_index 1 queue_num 8", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 2 queue_start_index 10 queue_num 4", "testpmd> ", ) # Set the mapping of user priority to region index on a port - self.dut.send_expect("set port 0 queue-region UP 3 region_id 0", "testpmd> ") - self.dut.send_expect("set port 0 queue-region UP 1 region_id 6", "testpmd> ") - self.dut.send_expect("set port 0 queue-region UP 2 region_id 2", "testpmd> ") - self.dut.send_expect("set port 0 queue-region UP 7 region_id 2", "testpmd> ") - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region UP 3 region_id 0", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region UP 1 region_id 6", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region UP 2 region_id 2", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region UP 7 region_id 2", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") # mapping table: # region | queue | User Priority @@ -436,10 +436,10 @@ class TestQueue_region(TestCase): ) # clear all the queue region configuration - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 3, 0, 4) - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 0, 0, 0) # confirm packet not to the same queue after flush all the queue region rull. @@ -457,109 +457,109 @@ class TestQueue_region(TestCase): def test_boundary_values(self): # boundary value testing of "Set a queue region on a port" # the following parameters can be set successfully - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 0 queue_num 16", "testpmd> ", ) self.verify("error" not in outstring, "boundary value check failed") - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 15 queue_num 1", "testpmd> ", ) self.verify("error" not in outstring, "boundary value check failed") - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region region_id 7 queue_start_index 2 queue_num 8", "testpmd> ", ) self.verify("error" not in outstring, "boundary value check failed") - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") # the following parameters failed to be set. # region_id can be set to 0-7 - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 8 queue_start_index 2 queue_num 2", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 1 queue_start_index 16 queue_num 1", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 2 queue_start_index 15 queue_num 2", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 3 queue_start_index 2 queue_num 3", "error", ) - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 0, 0, 0) - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") # boundary value testing of "Set the mapping of flowtype to region index on a port" - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 2 queue_num 2", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 7 queue_start_index 4 queue_num 4", "testpmd> ", ) # the following parameters can be set successfully - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region region_id 0 flowtype 63", "testpmd> " ) self.verify("error" not in outstring, "boundary value check failed") - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region region_id 7 flowtype 0", "testpmd> " ) self.verify("error" not in outstring, "boundary value check failed") # the following parameters failed to be set. - self.dut.send_expect("set port 0 queue-region region_id 0 flowtype 64", "error") - self.dut.send_expect("set port 0 queue-region region_id 2 flowtype 34", "error") - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region region_id 0 flowtype 64", "error") + self.sut_node.send_expect("set port 0 queue-region region_id 2 flowtype 34", "error") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 2, 2, 0) - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") # boundary value testing of "Set the mapping of UP to region index on a port" - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 2 queue_num 2", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 7 queue_start_index 4 queue_num 4", "testpmd> ", ) # the following parameters can be set successfully # UP value can be set to 0-7 - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region UP 7 region_id 0", "testpmd> " ) self.verify("error" not in outstring, "boundary value check failed") - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "set port 0 queue-region UP 0 region_id 7", "testpmd> " ) self.verify("error" not in outstring, "boundary value check failed") # the following parameters failed to be set. - self.dut.send_expect("set port 0 queue-region UP 8 region_id 0", "error") - self.dut.send_expect("set port 0 queue-region UP 1 region_id 2", "error") - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") - out = self.dut.send_expect("show port 0 queue-region", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region UP 8 region_id 0", "error") + self.sut_node.send_expect("set port 0 queue-region UP 1 region_id 2", "error") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") + out = self.sut_node.send_expect("show port 0 queue-region", "testpmd> ") self.get_and_compare_rules(out, 2, 0, 2) - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") def tear_down(self): """ @@ -570,6 +570,6 @@ class TestQueue_region(TestCase): """ Run after each test suite. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_queue_start_stop.py b/tests/TestSuite_queue_start_stop.py index 1df59a1b..7b20e186 100644 --- a/tests/TestSuite_queue_start_stop.py +++ b/tests/TestSuite_queue_start_stop.py @@ -13,8 +13,8 @@ import os import re import time -from framework.packet import Packet, strip_pktload from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder, strip_pktload from framework.settings import FOLDERS from framework.test_case import TestCase @@ -35,9 +35,9 @@ class TestQueueStartStop(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) self.verify(len(self.ports) >= 1, "Insufficient number of ports.") - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -53,9 +53,9 @@ class TestQueueStartStop(TestCase): # dpdk patch and build try: - self.dut.session.copy_file_to(patch_file, patch_dst) + self.sut_node.session.copy_file_to(patch_file, patch_dst) self.patch_hotfix_dpdk(patch_dst + "macfwd_log.patch", True) - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) except Exception as e: raise IOError("dpdk setup failure: %s" % e) @@ -66,16 +66,16 @@ class TestQueueStartStop(TestCase): """ Send packages according to parameters. """ - rxitf = self.tester.get_interface(self.tester.get_local_port(rxPort)) - txitf = self.tester.get_interface(self.tester.get_local_port(txPort)) + rxitf = self.tg_node.get_interface(self.tg_node.get_local_port(rxPort)) + txitf = self.tg_node.get_interface(self.tg_node.get_local_port(txPort)) - dmac = self.dut.get_mac_address(txPort) + dmac = self.sut_node.get_mac_address(txPort) - pkt = Packet(pkt_type="UDP", pkt_len=pktSize) - inst = self.tester.tcpdump_sniff_packets(rxitf) - pkt.config_layer("ether", {"dst": dmac}) - pkt.send_pkt(self.tester, tx_port=txitf, count=4) - sniff_pkts = self.tester.load_tcpdump_sniff_packets(inst) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=pktSize) + inst = self.tg_node.tcpdump_sniff_packets(rxitf) + scapy_pkt_builder.config_layer("ether", {"dst": dmac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=txitf, count=4) + sniff_pkts = self.tg_node.load_tcpdump_sniff_packets(inst) if received: res = strip_pktload(sniff_pkts, layer="L4") @@ -93,9 +93,9 @@ class TestQueueStartStop(TestCase): """ try: if on: - self.dut.send_expect("patch -p0 < %s" % patch_dir, "#") + self.sut_node.send_expect("patch -p0 < %s" % patch_dir, "#") else: - self.dut.send_expect("patch -p0 -R < %s" % patch_dir, "#") + self.sut_node.send_expect("patch -p0 -R < %s" % patch_dir, "#") except Exception as e: raise ValueError("patch_hotfix_dpdk failure: %s" % e) @@ -104,17 +104,17 @@ class TestQueueStartStop(TestCase): queue start/stop test """ # dpdk start - eal_para = self.dut.create_eal_parameters() + eal_para = self.sut_node.create_eal_parameters() try: - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -i --portmask=0x1 --port-topology=loop" % (self.app_testpmd_path, eal_para), "testpmd>", 120, ) time.sleep(5) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.check_forwarding([0, 0], self.nic) except Exception as e: raise IOError("dpdk start and first forward failure: %s" % e) @@ -122,19 +122,19 @@ class TestQueueStartStop(TestCase): # stop rx queue test try: print("test stop rx queue") - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("port 0 rxq 0 stop", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("port 0 rxq 0 stop", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.check_forwarding([0, 0], self.nic, received=False) # start rx queue test print("test start rx queue stop tx queue") - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("port 0 rxq 0 start", "testpmd>") - self.dut.send_expect("port 0 txq 0 stop", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("port 0 rxq 0 start", "testpmd>") + self.sut_node.send_expect("port 0 txq 0 stop", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.check_forwarding([0, 0], self.nic, received=False) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() except Exception as e: raise IOError("queue start/stop forward failure: %s" % e) @@ -152,9 +152,9 @@ class TestQueueStartStop(TestCase): try: # start tx queue test print("test start rx and tx queue") - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("port 0 txq 0 start", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("port 0 txq 0 start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.check_forwarding([0, 0], self.nic) except Exception as e: raise IOError("queue start/stop forward failure: %s" % e) @@ -166,12 +166,12 @@ class TestQueueStartStop(TestCase): patch_dst = "/tmp/" try: - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("quit", "#") + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("quit", "#") except: print("Failed to quit testpmd") - self.dut.kill_all() + self.sut_node.kill_all() try: self.patch_hotfix_dpdk(patch_dst + "macfwd_log.patch", False) @@ -182,7 +182,7 @@ class TestQueueStartStop(TestCase): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.send_expect("rm -rf ./app/test-pmd/testpmd", "#") - self.dut.send_expect("rm -rf ./app/test-pmd/*.o", "#") - self.dut.send_expect("rm -rf ./app/test-pmd/build", "#") + self.sut_node.kill_all() + self.sut_node.send_expect("rm -rf ./app/test-pmd/testpmd", "#") + self.sut_node.send_expect("rm -rf ./app/test-pmd/*.o", "#") + self.sut_node.send_expect("rm -rf ./app/test-pmd/build", "#") diff --git a/tests/TestSuite_rss_key_update.py b/tests/TestSuite_rss_key_update.py index 0ce422ec..b6e6cfb6 100644 --- a/tests/TestSuite_rss_key_update.py +++ b/tests/TestSuite_rss_key_update.py @@ -54,9 +54,9 @@ class TestRssKeyUpdate(TestCase): } received_pkts = [] - self.tester.scapy_foreground() - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.tg_node.scapy_foreground() + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) # send packet with different source and dest ip if tran_type in packet_list.keys(): @@ -66,17 +66,17 @@ class TestRssKeyUpdate(TestCase): ) for i in range(10): packet = packet_temp % (i + 1, i + 2) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) if symmetric: packet2 = packet_temp % (i + 2, i + 1) - self.tester.scapy_append(packet2) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet2) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.dut.get_session_output(timeout=1) - self.dut.send_expect("stop", "testpmd>") + out = self.sut_node.get_session_output(timeout=1) + self.sut_node.send_expect("stop", "testpmd>") lines = out.split("\r\n") reta_line = {} # collect the hash result and the queue id @@ -209,40 +209,40 @@ class TestRssKeyUpdate(TestCase): else: self.verify(False, f"NIC Unsupported: {self.nic}") - cores = self.dut.get_core_list("all") + cores = self.sut_node.get_core_list("all") self.coremask = utils.create_mask(cores) - ports = self.dut.get_ports(self.nic) - self.ports_socket = self.dut.get_numa_id(ports[0]) + ports = self.sut_node.get_ports(self.nic) + self.ports_socket = self.sut_node.get_numa_id(ports[0]) self.verify(len(ports) >= 1, "Not enough ports available") - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ Run before each test case. """ - dutPorts = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(dutPorts[0]) - self.itf = self.tester.get_interface(localPort) + sutPorts = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(sutPorts[0]) + self.itf = self.tg_node.get_interface(localPort) - self.dut.kill_all() + self.sut_node.kill_all() self.pmdout.start_testpmd("Default", f"--rxq={queue} --txq={queue}") def test_set_hash_key_toeplitz(self): - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect(f"set nbcore {queue + 1}", "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect(f"set nbcore {queue + 1}", "testpmd> ") key = "4439796BB54C5023B675EA5B124F9F30B8A2C03DDFDC4D02A08C9B334AF64A4C05C6FA343958D8557D99583AE138C92E81150366" ck = "4439796BB54C50f3B675EF5B124F9F30B8A2C0FFFFDC4D02A08C9B334FF64A4C05C6FA343958D855FFF9583AE138C92E81150FFF" # configure the reta with specific mappings. for i in range(reta_num): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( f"port config 0 rss reta ({i},{reta_entries[i]})", "testpmd> " ) @@ -250,24 +250,24 @@ class TestRssKeyUpdate(TestCase): self.logger.info( f"***********************{iptype} rss test********************************" ) - self.dut.send_expect( + self.sut_node.send_expect( f"port config 0 rss-hash-key {iptype} {key}", "testpmd> " ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end func toeplitz / end" - self.dut.send_expect(cmd, "testpmd> ") - out = self.dut.send_expect(f"port config all rss {rsstype}", "testpmd> ") + self.sut_node.send_expect(cmd, "testpmd> ") + out = self.sut_node.send_expect(f"port config all rss {rsstype}", "testpmd> ") self.verify( "error" not in out, "Configuration of RSS hash failed: Invalid argument" ) ori_output = self.send_packet(self.itf, iptype, False) - self.dut.send_expect("show port 0 rss-hash key", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("show port 0 rss-hash key", "testpmd> ") + self.sut_node.send_expect( f"port config 0 rss-hash-key {iptype} {ck}", "testpmd> " ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end func toeplitz / end" - self.dut.send_expect(cmd, "testpmd> ") + self.sut_node.send_expect(cmd, "testpmd> ") new_output = self.send_packet(self.itf, iptype, False) self.verify( ori_output != new_output, @@ -276,10 +276,10 @@ class TestRssKeyUpdate(TestCase): def test_set_hash_key_toeplitz_symmetric(self): - self.dut.send_expect("set verbose 8", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect(f"set nbcore {queue + 1}", "testpmd> ") + self.sut_node.send_expect("set verbose 8", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect(f"set nbcore {queue + 1}", "testpmd> ") key = "4439796BB54C5023B675EA5B124F9F30B8A2C03DDFDC4D02A08C9B334AF64A4C05C6FA343958D8557D99583AE138C92E81150366" ck = "4439796BB54C50f3B675EF5B124F9F30B8A2C0FFFFDC4D02A08C9B334FF64A4C05C6FA343958D855FFF9583AE138C92E81150FFF" rule_action = "func symmetric_toeplitz queues end / end" @@ -287,7 +287,7 @@ class TestRssKeyUpdate(TestCase): # configure the reta with specific mappings. for i in range(reta_num): reta_entries.insert(i, random.randint(0, queue - 1)) - self.dut.send_expect( + self.sut_node.send_expect( f"port config 0 rss reta ({i},{reta_entries[i]})", "testpmd> " ) @@ -295,36 +295,36 @@ class TestRssKeyUpdate(TestCase): self.logger.info( f"***********************{iptype} rss test********************************" ) - self.dut.send_expect( + self.sut_node.send_expect( f"port config 0 rss-hash-key {iptype} {key}", "testpmd> " ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") rule_cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}" if "sctp" in iptype or "udp" in iptype or "tcp" in iptype: rule_cmd = rule_cmd.replace("/ ipv4 /", f"/ ipv4 / {rsstype} /") if "ipv6" in iptype: rule_cmd = rule_cmd.replace("ipv4", "ipv6") - self.dut.send_expect(rule_cmd, "testpmd> ") - out = self.dut.send_expect(f"port config all rss {rsstype}", "testpmd> ") + self.sut_node.send_expect(rule_cmd, "testpmd> ") + out = self.sut_node.send_expect(f"port config all rss {rsstype}", "testpmd> ") self.verify( "error" not in out, "configuration of rss hash failed: invalid argument" ) ori_output = self.send_packet(self.itf, iptype, True) - out = self.dut.send_expect("show port 0 rss-hash key", "testpmd> ") + out = self.sut_node.send_expect("show port 0 rss-hash key", "testpmd> ") self.verify("rss disable" not in out, "rss is disable!") - self.dut.send_expect( + self.sut_node.send_expect( f"port config 0 rss-hash-key {iptype} {ck}", "testpmd> " ) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") cmd = f"flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}" if "sctp" in iptype or "udp" in iptype or "tcp" in iptype: cmd = cmd.replace("/ ipv4 /", f"/ ipv4 / {rsstype} /") if "ipv6" in iptype: cmd = cmd.replace("ipv4", "ipv6") - self.dut.send_expect(cmd, "testpmd> ") + self.sut_node.send_expect(cmd, "testpmd> ") new_output = self.send_packet(self.itf, iptype, True) self.verify( ori_output != new_output, @@ -360,7 +360,7 @@ class TestRssKeyUpdate(TestCase): ) # Check supported hash key size - out = self.dut.send_expect("show port info all", "testpmd> ", 120) + out = self.sut_node.send_expect("show port info all", "testpmd> ", 120) self.verify( f"Hash key size in bytes: {nic_rss_key_size[self.nic]}" in out, "not expected hash key size!", @@ -373,7 +373,7 @@ class TestRssKeyUpdate(TestCase): # config key length longer/shorter than 104 hexa-decimal numbers for key, error in test_keys.items(): - out = self.dut.send_expect( + out = self.sut_node.send_expect( f"port config 0 rss-hash-key ipv4-udp {key}", "testpmd> " ) self.verify( @@ -383,7 +383,7 @@ class TestRssKeyUpdate(TestCase): # config ket length same as 104 hex-decimal numbers and keep the config key = "4439796BB54C50f3B675EF5B124F9F30B8A2C0FFFFDC4D02A08C9B334FF64A4C05C6FA343958D855FFF9583AE138C92E81150FFF" - out = self.dut.send_expect( + out = self.sut_node.send_expect( f"port config 0 rss-hash-key ipv4-udp {key}", "testpmd> " ) @@ -397,4 +397,4 @@ class TestRssKeyUpdate(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_rss_to_rte_flow.py b/tests/TestSuite_rss_to_rte_flow.py index 83055188..034e412c 100644 --- a/tests/TestSuite_rss_to_rte_flow.py +++ b/tests/TestSuite_rss_to_rte_flow.py @@ -12,7 +12,7 @@ Test moving RSS to rte_flow. import re import time -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder from framework.pmd_output import PmdOutput from framework.test_case import TestCase @@ -24,28 +24,28 @@ class TestRSS_to_Rteflow(TestCase): Move RSS to rte_flow Prerequisites """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.pmd_output = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.pmd_output = PmdOutput(self.sut_node) - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - localPort1 = self.tester.get_local_port(self.dut_ports[1]) + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + localPort1 = self.tg_node.get_local_port(self.sut_ports[1]) - self.tester0_itf = self.tester.get_interface(localPort0) - self.tester1_itf = self.tester.get_interface(localPort1) + self.tg0_itf = self.tg_node.get_interface(localPort0) + self.tg1_itf = self.tg_node.get_interface(localPort1) - self.tester0_mac = self.tester.get_mac(localPort0) - self.tester1_mac = self.tester.get_mac(localPort1) + self.tg0_mac = self.tg_node.get_mac(localPort0) + self.tg1_mac = self.tg_node.get_mac(localPort1) - self.pf0_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf1_interface = self.dut.ports_info[self.dut_ports[1]]["intf"] + self.pf0_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf1_interface = self.sut_node.ports_info[self.sut_ports[1]]["intf"] - self.pf0_mac = self.dut.get_mac_address(0) - self.pf1_mac = self.dut.get_mac_address(1) - self.pf0_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pf1_pci = self.dut.ports_info[self.dut_ports[1]]["pci"] - self.pmdout = PmdOutput(self.dut) + self.pf0_mac = self.sut_node.get_mac_address(0) + self.pf1_mac = self.sut_node.get_mac_address(1) + self.pf0_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pf1_pci = self.sut_node.ports_info[self.sut_ports[1]]["pci"] + self.pmdout = PmdOutput(self.sut_node) self.cores = "1S/2C/1T" self.pkt1 = ( "Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/SCTP(dport=80, sport=80)/('X'*48)" @@ -106,13 +106,13 @@ class TestRSS_to_Rteflow(TestCase): """ Run before each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def destroy_env(self): """ This is to stop testpmd. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) def get_queue_number(self, port_id=0): @@ -127,7 +127,7 @@ class TestRSS_to_Rteflow(TestCase): m = scanner.search(outstring) queue_id = m.group(1) print(("queue is %s" % queue_id)) - self.dut.send_expect( + self.sut_node.send_expect( "clear port stats all", "NIC statistics for port 1 cleared", 20 ) return queue_id @@ -136,12 +136,12 @@ class TestRSS_to_Rteflow(TestCase): """ send packet and check the result """ - itf = self.tester0_itf if port_id == 0 else self.tester1_itf + itf = self.tg0_itf if port_id == 0 else self.tg1_itf queue_list = [] if isinstance(pkts, list): for pkt in pkts: - self.tester.scapy_append('sendp(%s, iface="%s")' % (pkt, itf)) - self.tester.scapy_execute() + self.tg_node.scapy_append('sendp(%s, iface="%s")' % (pkt, itf)) + self.tg_node.scapy_execute() queue = self.get_queue_number(port_id) self.verify( queue in rss_queue, @@ -149,8 +149,8 @@ class TestRSS_to_Rteflow(TestCase): ) queue_list.append(queue) else: - self.tester.scapy_append('sendp(%s, iface="%s")' % (pkts, itf)) - self.tester.scapy_execute() + self.tg_node.scapy_append('sendp(%s, iface="%s")' % (pkts, itf)) + self.tg_node.scapy_execute() queue = self.get_queue_number(port_id) self.verify( queue in rss_queue, "the packet doesn't enter the expected RSS queue." @@ -242,10 +242,10 @@ class TestRSS_to_Rteflow(TestCase): ) pkt_list.append(scapy_pkt) - pkt = packet.Packet() - pkt.update_pkt(pkt_list) - itf = self.tester0_itf if port_id == 0 else self.tester1_itf - pkt.send_pkt(self.tester, tx_port=itf) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.update_pkt(pkt_list) + itf = self.tg0_itf if port_id == 0 else self.tg1_itf + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) def check_packet_queue(self, queue, out, port_id=0): """ @@ -306,20 +306,20 @@ class TestRSS_to_Rteflow(TestCase): if isinstance(ptype_list, list): for ptype in ptype_list: self.send_packet(ptype, port_id) - out = self.dut.send_expect("stop", "testpmd> ") + out = self.sut_node.send_expect("stop", "testpmd> ") if isinstance(queue, list): self.check_packet_selected_queue(queue, out, port_id) else: self.check_packet_queue(queue, out, port_id) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) else: self.send_packet(ptype_list, port_id) - out = self.dut.send_expect("stop", "testpmd> ", 120) + out = self.sut_node.send_expect("stop", "testpmd> ", 120) if isinstance(queue, list): self.check_packet_selected_queue(queue, out, port_id) else: self.check_packet_queue(queue, out, port_id) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) def check_packet_selected_queue(self, queues, out, port_id=0): """ @@ -388,16 +388,16 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - out = self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + out = self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Show port default RSS functions - self.dut.send_expect( + self.sut_node.send_expect( "show port 0 rss-hash", "ipv4-frag ipv4-other ipv6-frag ipv6-other" ) - self.dut.send_expect( + self.sut_node.send_expect( "show port 1 rss-hash", "ipv4-frag ipv4-other ipv6-frag ipv6-other" ) @@ -407,63 +407,63 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-udp", "0", port_id=1) # Enable ipv4-udp RSS hash function on port 0 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect("show port 0 rss-hash", "ipv4-udp") + self.sut_node.send_expect("show port 0 rss-hash", "ipv4-udp") self.send_check_100_packet_queue("ipv4-other", "0", port_id=0) self.send_check_100_packet_queue("ipv4-other", "all", port_id=1) self.send_check_100_packet_queue("ipv4-udp", "all", port_id=0) self.send_check_100_packet_queue("ipv4-udp", "0", port_id=1) # Enable all RSS hash function on port 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / end actions rss types l2-payload end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / end actions rss types ipv4-other end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / end actions rss types ipv4-frag end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / end actions rss types ipv6-frag end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "show port 1 rss-hash", "ipv4-frag ipv4-tcp ipv4-udp ipv4-sctp ipv4-other ipv6-frag ipv6-tcp ipv6-udp ipv6-sctp ipv6-other l2-payload sctp", ) @@ -499,47 +499,47 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-udp", "all", port_id=0) # Enable all RSS hash function on port 0 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / end actions rss types l2-payload end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-other end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-frag end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-frag end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "created", ) @@ -548,9 +548,9 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list1, "all", port_id=1) # delete rule 0/2/10 of port 1 - self.dut.send_expect("flow destroy 1 rule 0", "Flow rule #0 destroyed") - self.dut.send_expect("flow destroy 1 rule 2", "Flow rule #2 destroyed") - self.dut.send_expect("flow destroy 1 rule 10", "Flow rule #10 destroyed") + self.sut_node.send_expect("flow destroy 1 rule 0", "Flow rule #0 destroyed") + self.sut_node.send_expect("flow destroy 1 rule 2", "Flow rule #2 destroyed") + self.sut_node.send_expect("flow destroy 1 rule 10", "Flow rule #10 destroyed") self.send_check_100_packet_queue(ptype_list1, "all", port_id=0) @@ -570,15 +570,15 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list4, "0", port_id=1) # flush all rules of port 0 - self.dut.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect("show port 0 rss-hash", "RSS disabled") + self.sut_node.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("show port 0 rss-hash", "RSS disabled") self.send_check_100_packet_queue(ptype_list1, "0", port_id=0) self.send_check_100_packet_queue(ptype_list3, "all", port_id=1) self.send_check_100_packet_queue(ptype_list4, "0", port_id=1) # flush all rules of port 1 - self.dut.send_expect("flow flush 1", "testpmd> ") - self.dut.send_expect("show port 1 rss-hash", "RSS disabled") + self.sut_node.send_expect("flow flush 1", "testpmd> ") + self.sut_node.send_expect("show port 1 rss-hash", "RSS disabled") self.send_check_100_packet_queue(ptype_list1, "0", port_id=0) self.send_check_100_packet_queue(ptype_list1, "0", port_id=1) @@ -604,17 +604,17 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create a rss queue rule on port 0 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 1 4 7 end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "show port 0 rss-hash", "ipv4-frag ipv4-other ipv6-frag ipv6-other" ) @@ -624,7 +624,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-udp", "0", port_id=0) self.send_check_100_packet_queue("ipv4-udp", "0", port_id=1) # Create a rss queue rule on port 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern end actions rss types end queues 3 end / end", "created", ) @@ -635,7 +635,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-udp", "0", port_id=1) # Create a different rss queue rule on port 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern end actions rss types end queues 1 4 7 end / end", "created", ) @@ -644,8 +644,8 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-other", ["1", "4", "7"], port_id=1) # flush rule on port 0 - self.dut.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect( "show port 0 rss-hash", "ipv4-frag ipv4-other ipv6-frag ipv6-other" ) # send the packets and verify the results @@ -653,42 +653,42 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-other", ["1", "4", "7"], port_id=1) # Create a rss queue rule on port 0 again - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 1 4 7 end / end", "created", ) # delete rule 1 of port 1 - self.dut.send_expect("flow destroy 1 rule 1", "Flow rule #1 destroyed") + self.sut_node.send_expect("flow destroy 1 rule 1", "Flow rule #1 destroyed") # send the packets and verify the results self.send_check_100_packet_queue("ipv4-other", ["1", "4", "7"], port_id=0) self.send_check_100_packet_queue("ipv4-other", "all", port_id=1) # Create a rss queue rule on port 1 again - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern end actions rss types end queues 3 end / end", "created", ) # delete rule 0 of port 1 - self.dut.send_expect("flow destroy 1 rule 0", "Flow rule #0 destroyed") + self.sut_node.send_expect("flow destroy 1 rule 0", "Flow rule #0 destroyed") # send the packets and verify the results self.send_check_100_packet_queue("ipv4-other", ["1", "4", "7"], port_id=0) self.send_check_100_packet_queue("ipv4-other", ["3"], port_id=1) # flush rules of port 1 - self.dut.send_expect("flow flush 1", "testpmd> ") + self.sut_node.send_expect("flow flush 1", "testpmd> ") # send the packets and verify the results self.send_check_100_packet_queue("ipv4-other", ["1", "4", "7"], port_id=0) self.send_check_100_packet_queue("ipv4-other", "all", port_id=1) # Set all the queues to the rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 0 1 2 3 4 5 6 7 end / end", "created", ) self.send_check_100_packet_queue("ipv4-other", "all", port_id=0) # Set a wrong parameter: queue ID is 16 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 8 end / end", "Invalid argument", ) @@ -715,13 +715,13 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create rss rules on port 0 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) @@ -731,7 +731,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-udp", "all", port_id=0) self.send_check_100_packet_queue("ipv4-udp", "0", port_id=1) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 0 2 7 end / end", "created", ) @@ -742,31 +742,31 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue("ipv4-udp", "0", port_id=1) # Create rss rules on port 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern end actions rss types end queues 1 4 7 end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / end actions rss types l2-payload end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / sctp / end actions rss types ipv6-sctp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / udp / end actions rss types ipv6-udp end queues end / end", "created", ) @@ -798,7 +798,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list3, "0", port_id=1) # Set a different RSS queue rule on port 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern end actions rss types end queues 3 end / end", "created", ) @@ -852,33 +852,33 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create a rss rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - out1 = self.dut.send_expect("show port 0 rss-hash key", "testpmd> ", 120) + out1 = self.sut_node.send_expect("show port 0 rss-hash key", "testpmd> ", 120) rss_queue = ["0", "1", "2", "3"] port0_list1 = self.send_and_check(pkts, rss_queue, port_id=0) port1_list1 = self.send_and_check(pkts, rss_queue, port_id=1) # Create a rss key rule on port 0 key = "1234567890123456789012345678901234567890FFFFFFFFFFFF1234567890123456789012345678901234567890FFFFFFFFFFFF" - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end key %s / end" % key, "created", ) - out2 = self.dut.send_expect("show port 0 rss-hash key", "testpmd> ", 120) + out2 = self.sut_node.send_expect("show port 0 rss-hash key", "testpmd> ", 120) port0_list2 = self.send_and_check(pkts, rss_queue, port_id=0) port1_list2 = self.send_and_check(pkts, rss_queue, port_id=1) @@ -890,12 +890,12 @@ class TestRSS_to_Rteflow(TestCase): # Create a rss key rule with truncating key_len on port 0 key = "1234567890123456789012345678901234567890FFFFFFFFFFFF1234567890123456789012345678901234567890FFFFFFFFFFFF" key_len = "50" - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end key %s key_len %s / end" % (key, key_len), "created", ) - out3 = self.dut.send_expect("show port 0 rss-hash key", "testpmd> ", 120) + out3 = self.sut_node.send_expect("show port 0 rss-hash key", "testpmd> ", 120) port0_list3 = self.send_and_check(pkts, rss_queue, port_id=0) port1_list3 = self.send_and_check(pkts, rss_queue, port_id=1) @@ -914,12 +914,12 @@ class TestRSS_to_Rteflow(TestCase): # Create a rss rule with padding key_len on port 0 key = "1234567890123456789012345678901234567890FFFFFFFFFFFF1234567890123456789012345678901234567890FFFFFF" key_len = "52" - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end key %s key_len %s / end" % (key, key_len), "created", ) - out4 = self.dut.send_expect("show port 0 rss-hash key", "testpmd> ", 120) + out4 = self.sut_node.send_expect("show port 0 rss-hash key", "testpmd> ", 120) port0_list4 = self.send_and_check(pkts, rss_queue, port_id=0) port1_list4 = self.send_and_check(pkts, rss_queue, port_id=1) @@ -936,12 +936,12 @@ class TestRSS_to_Rteflow(TestCase): # Create a rss key rule on port 1 key = "1234567890123456789012345678901234567890FFFFFFFFFFFF1234567890123456789012345678909876543210EEEEEEEEEEEE" - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end key %s / end" % key, "created", ) - out5 = self.dut.send_expect("show port 1 rss-hash key", "testpmd> ", 120) + out5 = self.sut_node.send_expect("show port 1 rss-hash key", "testpmd> ", 120) port0_list5 = self.send_and_check(pkts, rss_queue, port_id=0) port1_list5 = self.send_and_check(pkts, rss_queue, port_id=1) @@ -975,9 +975,9 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --disable-rss --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) ptype_list1 = [ "ipv4-other", @@ -996,11 +996,11 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list1, "0", port_id=1) # enable ipv4-udp and ipv6-tcp RSS function type - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types ipv6-tcp end queues end / end", "created", ) @@ -1035,7 +1035,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list3, "0", port_id=1) # set queue 1, 4, 7 into RSS queue rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 1 4 7 end / end", "created", ) @@ -1069,11 +1069,11 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list3, "0", port_id=1) # enable ipv4-udp and ipv6-other RSS function type on port 1 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 1 ingress pattern eth / ipv6 / end actions rss types ipv6-other end queues end / end", "created", ) @@ -1106,7 +1106,7 @@ class TestRSS_to_Rteflow(TestCase): ] self.send_check_100_packet_queue(ptype_list4, "0", port_id=1) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") ptype_list1 = [ "ipv4-other", "ipv4-frag", @@ -1136,7 +1136,7 @@ class TestRSS_to_Rteflow(TestCase): ] self.send_check_100_packet_queue(ptype_list3, "0", port_id=1) - self.dut.send_expect("flow flush 1", "testpmd> ") + self.sut_node.send_expect("flow flush 1", "testpmd> ") ptype_list1 = [ "ipv4-other", "ipv4-frag", @@ -1176,17 +1176,17 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --pkt-filter-mode=perfect" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Enable ipv4-udp type and Create a rss queue rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 3 4 5 end / end", "created", ) @@ -1195,7 +1195,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_and_check(self.pkt2, rss_queue, port_id=0) # Create a flow director rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 10.0.0.1 dst is 192.168.0.2 / udp src is 50 dst is 50 / end actions queue index 1 / end", "created", ) @@ -1203,7 +1203,7 @@ class TestRSS_to_Rteflow(TestCase): rss_queue = ["1"] self.send_and_check(self.pkt2, rss_queue, port_id=0) # There can't be more than one RSS queue rule existing. - self.dut.send_expect("flow destroy 0 rule 2", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 2", "testpmd> ") rss_queue = ["3", "4", "5"] self.send_and_check(self.pkt2, rss_queue, port_id=0) @@ -1229,17 +1229,17 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=16 --txq=16 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create a rss queue rule. - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 7 8 10 11 12 14 15 end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end", "created", ) @@ -1253,15 +1253,15 @@ class TestRSS_to_Rteflow(TestCase): ) # Create three queue regions. - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x2000 / end actions rss queues 7 8 end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x4000 / end actions rss queues 11 12 end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x6000 / end actions rss queues 15 end / end", "created", ) @@ -1274,7 +1274,7 @@ class TestRSS_to_Rteflow(TestCase): queue3 = self.send_and_check(self.prio_pkt3, rss_queue, port_id=0) # Destroy one queue region rule, all the rules become invalid. - self.dut.send_expect("flow destroy 0 rule 3", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 3", "testpmd> ") rss_queue = ["7", "8", "10", "11", "12", "14", "15"] queue_list2 = self.send_and_check(pkts, rss_queue, port_id=0) self.verify( @@ -1304,36 +1304,36 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=16 --txq=16 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x2000 / end actions rss queues 10 11 end / end", "error", ) # Create a rss queue rule. - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 8 10 11 12 15 end / end", "created", ) # Set a queue region with invalid queue ID - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x2000 / end actions rss queues 8 9 end / end", "error", ) # Set a queue region with discontinuous queue ID - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x2000 / end actions rss queues 8 10 end / end", "error", ) # Set a queue region with invalid queue number - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x4000 / end actions rss queues 10 11 12 end / end", "error", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern vlan tci is 0x2000 / end actions rss queues 10 11 end / end", "created", ) @@ -1361,31 +1361,31 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=16 --txq=16 --port-topology=chained" ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end", "created", 120, ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Set a queue region. - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 queue_start_index 1 queue_num 1", "testpmd> ", ) - self.dut.send_expect( + self.sut_node.send_expect( "set port 0 queue-region region_id 0 flowtype 31", "testpmd> " ) - self.dut.send_expect("set port 0 queue-region flush on", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush on", "testpmd> ") # send the packets and verify the results rss_queue = ["1"] self.send_and_check(self.pkt2, rss_queue, port_id=0) # Create a RSS queue rule. - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types end queues 6 7 end / end", "testpmd> ", ) @@ -1394,7 +1394,7 @@ class TestRSS_to_Rteflow(TestCase): self.send_and_check(self.pkt2, rss_queue, port_id=0) # destroy the queue region. - self.dut.send_expect("set port 0 queue-region flush off", "testpmd> ") + self.sut_node.send_expect("set port 0 queue-region flush off", "testpmd> ") # send the packets and verify the results rss_queue = ["6", "7"] self.send_and_check(self.pkt2, rss_queue, port_id=0) @@ -1421,13 +1421,13 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Show port default RSS functions - self.dut.send_expect("show port 0 rss-hash", "ipv4 ipv6 ipv6-ex") + self.sut_node.send_expect("show port 0 rss-hash", "ipv4 ipv6 ipv6-ex") ptype_list1 = [ "ipv4-other", "ipv4-frag", @@ -1443,19 +1443,19 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list1, "all", port_id=0) # Disable RSS hash function - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types none end / end", "created", ) - self.dut.send_expect("show port 0 rss-hash", "RSS disabled") + self.sut_node.send_expect("show port 0 rss-hash", "RSS disabled") self.send_check_100_packet_queue(ptype_list1, "0", port_id=0) # Enable RSS hash function all - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types all end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "show port 0 rss-hash", "ipv4 ipv4-tcp ipv4-udp ipv6 ipv6-tcp ipv6-udp ipv6-ex ipv6-tcp-ex ipv6-udp-ex", ) @@ -1483,19 +1483,19 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Show port default RSS fucntions - self.dut.send_expect("show port 0 rss-hash", "ipv4 ipv6 ipv6-ex") + self.sut_node.send_expect("show port 0 rss-hash", "ipv4 ipv6 ipv6-ex") # enable ipv4-udp rss function - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4-udp end / end", "created", ) - self.dut.send_expect("show port 0 rss-hash", "ipv4-udp") + self.sut_node.send_expect("show port 0 rss-hash", "ipv4-udp") # send the packets and verify the results self.send_check_100_packet_queue("ipv4-udp", "all", port_id=0) ptype_list1 = [ @@ -1538,19 +1538,19 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create a rss queue rule if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss queues 1 2 end / end", "created", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss queues 1 4 7 end / end", "created", ) @@ -1573,16 +1573,16 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list1, ["1", "4", "7"], port_id=0) # There can't be more than one RSS queue rule existing. - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss queues 3 end / end", "error" ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4-udp end queues 3 end / end", "error", ) # Flush the rules and create a new RSS queue rule. - self.dut.send_expect("flow flush 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4-udp end queues 3 end / end", "created", ) @@ -1601,20 +1601,20 @@ class TestRSS_to_Rteflow(TestCase): ] self.send_check_100_packet_queue(ptype_list2, "0", port_id=0) - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") # Set a wrong parameter: queue ID is 8 - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss queues 8 end / end", "error" ) # Set all the queues to the rule if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss queues 0 1 2 3 end / end", "created", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss queues 0 1 2 3 4 5 6 7 end / end", "created", ) @@ -1647,23 +1647,23 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --port-topology=chained" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create a rss queue rule if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types udp ipv4-tcp ipv6-sctp ipv4-other end queues 1 2 3 end / end", "created", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types udp ipv4-tcp ipv6-sctp ipv4-other end queues 1 4 7 end / end", "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "show port 0 rss-hash", "ipv4-tcp ipv4-udp ipv6-udp ipv6-udp-ex" ) ptype_list1 = ["ipv4-udp", "ipv4-tcp", "ipv6-udp"] @@ -1688,19 +1688,19 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list2, "0", port_id=0) # Create different ptype rss rule. - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4 ipv6 end queues 1 3 end / end", "created", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4 ipv6 end queues 3 7 end / end", "created", ) - self.dut.send_expect("show port 0 rss-hash", "ipv4 ipv6") + self.sut_node.send_expect("show port 0 rss-hash", "ipv4 ipv6") ptype_list3 = [ "ipv4-other", "ipv4-frag", @@ -1747,9 +1747,9 @@ class TestRSS_to_Rteflow(TestCase): "%s" % self.cores, "--rxq=8 --txq=8 --disable-rss --port-topology=chained", ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) ptype_list1 = [ @@ -1767,28 +1767,28 @@ class TestRSS_to_Rteflow(TestCase): self.send_check_100_packet_queue(ptype_list1, "0", port_id=0) # Create a rss queue rule - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types all end / end", "created", ) self.send_check_100_packet_queue(ptype_list1, "all", port_id=0) # Delete the rule - self.dut.send_expect("flow flush 0", "testpmd> ") + self.sut_node.send_expect("flow flush 0", "testpmd> ") self.send_check_100_packet_queue(ptype_list1, "0", port_id=0) # Create a rss queue rule if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv6-tcp ipv4-udp end queues 1 2 3 end / end", "created", ) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv6-tcp ipv4-udp end queues 5 6 7 end / end", "created", ) - self.dut.send_expect("show port 0 rss-hash", "ipv4-udp ipv6-tcp") + self.sut_node.send_expect("show port 0 rss-hash", "ipv4-udp ipv6-tcp") # send the packets and verify the results ptype_list2 = ["ipv4-udp", "ipv6-tcp"] @@ -1836,20 +1836,20 @@ class TestRSS_to_Rteflow(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=8 --txq=8 --pkt-filter-mode=perfect" ) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(2) # Create a rss queue rule if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4-udp end queues 2 3 end / end", "created", ) self.send_and_check(self.pkt2, ["2", "3"], port_id=0) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern end actions rss types ipv4-udp end queues 3 4 5 end / end", "created", ) @@ -1862,19 +1862,19 @@ class TestRSS_to_Rteflow(TestCase): "IGB_1G-I210_COPPER", "IGC-I225_LM", ]: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 proto is 17 / udp dst is 50 / end actions queue index 1 / end", "created", ) self.send_and_check(self.pkt2, ["1"], port_id=0) else: - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 10.0.0.1 dst is 192.168.0.2 / udp src is 50 dst is 50 / end actions queue index 1 / end", "created", ) self.send_and_check(self.pkt2, ["1"], port_id=0) # Delete the fdir rule - self.dut.send_expect("flow destroy 0 rule 1", "testpmd> ") + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd> ") if self.nic in ["IGC-I225_LM", "IGB_1G-I210_COPPER"]: self.send_and_check(self.pkt2, ["2", "3"], port_id=0) else: @@ -1885,12 +1885,12 @@ class TestRSS_to_Rteflow(TestCase): Run after each test case. """ self.destroy_env() - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_rte_flow.py b/tests/TestSuite_rte_flow.py index 8f138be5..2d5b2b6c 100644 --- a/tests/TestSuite_rte_flow.py +++ b/tests/TestSuite_rte_flow.py @@ -30,14 +30,14 @@ class RteFlow(TestCase): """ An abstraction to remove repeated code throughout the subclasses of this class """ - return self.dut.send_expect(command, "testpmd>") + return self.sut_node.send_expect(command, "testpmd>") def get_mac_address_for_port(self, port_id: int) -> str: - return self.dut.get_mac_address(port_id) + return self.sut_node.get_mac_address(port_id) def send_scapy_packet(self, port_id: int, packet: str): - itf = self.tester.get_interface(port_id) - return self.tester.send_expect( + itf = self.tg_node.get_interface(port_id) + return self.tg_node.send_expect( f'sendp({packet}, iface="{itf}")', ">>>", timeout=30 ) @@ -45,23 +45,23 @@ class RteFlow(TestCase): """ Prerequisite steps for each test suit. """ - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.rx_port = self.dut_ports[0] - self.tx_port = self.dut_ports[1] + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.rx_port = self.sut_ports[0] + self.tx_port = self.sut_ports[1] - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd("default", "--rxq 2 --txq 2") self.exec("set verbose 3") self.exec("set fwd rxonly") - self.tester.send_expect("scapy", ">>>") + self.tg_node.send_expect("scapy", ">>>") def initialize_flow_rule(self, rule: str): - output: str = self.exec(f"flow validate {self.dut_ports[0]} {rule}") + output: str = self.exec(f"flow validate {self.sut_ports[0]} {rule}") if "Unsupported pattern" in output: return False - output = self.exec(f"flow create {self.dut_ports[0]} {rule}") + output = self.exec(f"flow create {self.sut_ports[0]} {rule}") self.verify( "created" in output or "Flow rule validated" in output, "Flow rule was not created: " + output, @@ -72,7 +72,7 @@ class RteFlow(TestCase): self, packets, pass_fail_function: Callable[[str], bool], error_message: str ): for packet in packets: - output = self.send_scapy_packet(self.dut_ports[1], packet) + output = self.send_scapy_packet(self.sut_ports[1], packet) output = self.pmdout.get_output() self.verify(pass_fail_function(output), error_message + "\r\n" + output) @@ -118,8 +118,8 @@ class RteFlow(TestCase): rule, pass_packets, fail_packets, - f"port {self.dut_ports[0]}/queue 1", - f"port {self.dut_ports[0]}/queue 0", + f"port {self.sut_ports[0]}/queue 1", + f"port {self.sut_ports[0]}/queue 0", "Error: packet went to the wrong queue", ) @@ -132,7 +132,7 @@ class RteFlow(TestCase): """ Run after each test case. """ - self.exec(f"flow flush {self.dut_ports[0]}") + self.exec(f"flow flush {self.sut_ports[0]}") self.exec("stop") def tear_down_all(self): @@ -140,9 +140,9 @@ class RteFlow(TestCase): When the case of this test suite finished, the environment should clear up. """ - self.tester.send_expect("exit()", "#") - self.dut.kill_all() - self.tester.kill_all() + self.tg_node.send_expect("exit()", "#") + self.sut_node.kill_all() + self.tg_node.kill_all() """ Edge Cases diff --git a/tests/TestSuite_rteflow_priority.py b/tests/TestSuite_rteflow_priority.py index 813df85c..2b15e470 100644 --- a/tests/TestSuite_rteflow_priority.py +++ b/tests/TestSuite_rteflow_priority.py @@ -18,10 +18,10 @@ from time import sleep from scapy.utils import PcapWriter, socket, struct import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream imp.reload(sys) @@ -33,13 +33,13 @@ class TestRteflowPriority(TestCase): PMD prerequisites. """ - self.dut_ports = self.dut.get_ports(self.nic) - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.__tx_iface = self.tester.get_interface(localPort) - cores = self.dut.get_core_list("1S/5C/1T") + self.sut_ports = self.sut_node.get_ports(self.nic) + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.__tx_iface = self.tg_node.get_interface(localPort) + cores = self.sut_node.get_core_list("1S/5C/1T") self.coreMask = utils.create_mask(cores) - self.portMask = utils.create_mask([self.dut_ports[0]]) - self.path = self.dut.apps_name["test-pmd"] + self.portMask = utils.create_mask([self.sut_ports[0]]) + self.path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -54,12 +54,12 @@ class TestRteflowPriority(TestCase): scapyCmds = [] def check_link(self): - # check status in test case, dut and tester both should be up. - self.pmd_output = PmdOutput(self.dut) + # check status in test case, SUT and TG both should be up. + self.pmd_output = PmdOutput(self.sut_node) res = self.pmd_output.wait_link_status_up("all", timeout=30) if res is True: for i in range(15): - out = self.tester.get_port_status(self.dut_ports[0]) + out = self.tg_node.get_port_status(self.sut_ports[0]) if out == "up": break else: @@ -72,8 +72,8 @@ class TestRteflowPriority(TestCase): """ Send packages and verify behavior. """ - self.tester.scapyCmds.append(cmd) - self.tester.scapy_execute() + self.tg_node.scapyCmds.append(cmd) + self.tg_node.scapy_execute() def get_packet_number(self, out, match_string): """ @@ -115,97 +115,97 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=1 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) - self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 15) # create fdir and switch rules with different priority - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end actions queue index 1 / end ", "testpmd>", 15, ) self.verify("Successed" and "(2)" in out, "failed: rule map to wrong filter") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 / udp / vxlan / eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / udp src is 25 dst is 23 / end actions queue index 2 / end ", "testpmd>", 15, ) self.verify("Successed" and "(2)" in out, "failed: rule map to wrong filter") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 src is 192.168.0.4 dst is 192.168.0.7 tos is 4 ttl is 20 / tcp src is 25 dst is 23 / end actions queue index 3 / end ", "testpmd>", 15, ) self.verify("Successed" and "(1)" in out, "failed: rule map to wrong filter") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 / udp / vxlan / eth / ipv4 src is 192.168.0.4 dst is 192.168.0.7 / udp src is 25 dst is 23 / end actions queue index 4 / end ", "testpmd>", 15, ) self.verify("Successed" and "(1)" in out, "failed: rule map to wrong filter") - out = self.dut.send_expect("flow list 0", "testpmd> ", 15) + out = self.sut_node.send_expect("flow list 0", "testpmd> ", 15) self.logger.info(out) self.verify("3" in out, "failed: flow rules created error") # send pkts and check the rules are written to different filter tables and the rules can work - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>", 20) a = self.check_link() self.verify(a, "failed: link can not up") self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:66")/IP(src="192.168.0.2",dst="192.168.0.3",tos=4)/TCP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 1) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 1") - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>", 20) self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:66")/IP()/UDP()/VXLAN()/Ether()/IP(src="192.168.0.2",dst="192.168.0.3",tos=4)/UDP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 2) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 2") - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>", 20) self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:66")/IP(src="192.168.0.4",dst="192.168.0.7",tos=4,ttl=20)/TCP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 3) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 3") - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>", 20) self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:66")/IP()/UDP()/VXLAN()/Ether()/IP(src="192.168.0.4",dst="192.168.0.7")/UDP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 4) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 4") # create rules without priority, only the pattern supported by switch can be created. - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 192.168.1.2 dst is 192.168.0.3 tos is 5 / tcp src is 25 dst is 23 / end actions queue index 1 / end ", "testpmd>", 15, ) self.verify("Failed" not in out, "failed: default priority 0 is not work") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 ttl is 20 / sctp src is 25 dst is 23 / end actions queue index 1 / end ", "testpmd>", 15, @@ -215,8 +215,8 @@ class TestRteflowPriority(TestCase): "failed: pattern only support by fdir can not be created in default priority", ) - self.dut.send_expect("flow flush 0", "testpmd>", 20) - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("flow flush 0", "testpmd>", 20) + self.sut_node.send_expect("quit", "#", 50) def test_priority_in_nonpipeline_mode(self): """ @@ -229,14 +229,14 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / end actions queue index 2 / mark / end", "testpmd>", 15, @@ -244,7 +244,7 @@ class TestRteflowPriority(TestCase): self.verify( "Successed" and "(1)" in out, "failed: rule can't be created to fdir" ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / end actions queue index 2 / mark / end", "testpmd>", 15, @@ -253,8 +253,8 @@ class TestRteflowPriority(TestCase): "Failed" in out, "failed: default value of priority is 0 in non-pipeline mode", ) - self.dut.send_expect("flow flush 0", "testpmd>", 20) - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("flow flush 0", "testpmd>", 20) + self.sut_node.send_expect("quit", "#", 50) # restart testpmd with pipeline-mode-support=0, check the testpmd is launch in non-pipeline mode command = ( @@ -262,14 +262,14 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=0 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / end actions queue index 2 / mark / end", "testpmd>", 15, @@ -277,7 +277,7 @@ class TestRteflowPriority(TestCase): self.verify( "Successed" and "(1)" in out, "failed: rule can't be created to fdir" ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / end actions queue index 2 / mark / end", "testpmd>", 15, @@ -286,8 +286,8 @@ class TestRteflowPriority(TestCase): "Failed" in out, "failed: default value of priority is 0 in non-pipeline mode", ) - self.dut.send_expect("flow flush 0", "testpmd>", 20) - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("flow flush 0", "testpmd>", 20) + self.sut_node.send_expect("quit", "#", 50) def test_no_destination_high_prority(self): """ @@ -300,15 +300,15 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=1 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) # create no destination high priority rules, check the rules can not be created. - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end actions / end", "testpmd>", 15, @@ -317,7 +317,7 @@ class TestRteflowPriority(TestCase): "Bad argument" in out, "failed: no destination high priority rule is not acceptable", ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end", "testpmd>", 15, @@ -326,7 +326,7 @@ class TestRteflowPriority(TestCase): "Bad argument" in out, "failed: no destination high priority rule is not acceptable", ) - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("quit", "#", 50) def test_create_fdir_rule_with_priority_0(self): """ @@ -339,27 +339,27 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=1 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) # create rules only supported by fdir with priority 0, check the rules can not be created. - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv6 src is 1111:2222:3333:4444:5555:6666:7777:8888 dst is 1111:2222:3333:4444:5555:6666:7777:9999 / sctp src is 25 dst is 23 / end actions queue index 1 / end", "testpmd>", 15, ) self.verify("Failed" in out, "failed: priority is not work") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 ttl is 20 / sctp src is 25 dst is 23 / end actions queue index 1 / end", "testpmd>", 15, ) self.verify("Failed" in out, "failed: priority is not work") - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("quit", "#", 50) def test_create_switch_rule_with_priority_1(self): """ @@ -372,27 +372,27 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=1 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) # create rules only supported by switch with priority 1, check the rules can not be created. - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 / nvgre / eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / end actions queue index 3 / end", "testpmd>", 15, ) self.verify("Failed" in out, "failed: priority is not work") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 / nvgre / eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / udp src is 25 dst is 23 / end actions queue index 3 / end", "testpmd>", 15, ) self.verify("Failed" in out, "failed: priority is not work") - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("quit", "#", 50) def test_rules_with_same_parameters_different_action(self): """ @@ -405,17 +405,17 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=1 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) # create rules with same parameters but different action - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end actions queue index 1 / end", "testpmd>", 15, @@ -423,7 +423,7 @@ class TestRteflowPriority(TestCase): self.verify( "Successed" and "(2)" in out, "failed: switch rule can't be created" ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end actions queue index 3 / end", "testpmd>", 15, @@ -431,7 +431,7 @@ class TestRteflowPriority(TestCase): self.verify("Successed" and "(1)" in out, "failed: fdir rule can't be created") # send a pkt to check the switch rule is work for its high priority - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>", 20) a = self.check_link() self.verify(a, "failed: link can not up") @@ -439,25 +439,25 @@ class TestRteflowPriority(TestCase): 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:01")/IP(src="192.168.0.2",dst="192.168.0.3",tos=4)/TCP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 1) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 1") # remove the switch rule and check the fdir rule is work - self.dut.send_expect("flow destroy 0 rule 0", "testpmd>", 15) - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("flow destroy 0 rule 0", "testpmd>", 15) + self.sut_node.send_expect("start", "testpmd>", 20) self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:02")/IP(src="192.168.0.2",dst="192.168.0.3",tos=4)/TCP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 3) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 3") - self.dut.send_expect("flow flush 0", "testpmd>", 15) - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("flow flush 0", "testpmd>", 15) + self.sut_node.send_expect("quit", "#", 50) # restart testpmd in pipeline mode command = ( @@ -465,24 +465,24 @@ class TestRteflowPriority(TestCase): + '-c %s -n 4 -a %s,pipeline-mode-support=1 --log-level="ice,7" -- -i --portmask=%s --rxq=10 --txq=10' % ( self.coreMask, - self.dut.ports_info[0]["pci"], - utils.create_mask([self.dut_ports[0]]), + self.sut_node.ports_info[0]["pci"], + utils.create_mask([self.sut_ports[0]]), ) ) - out = self.dut.send_expect(command, "testpmd> ", 120) + out = self.sut_node.send_expect(command, "testpmd> ", 120) self.logger.debug(out) - self.dut.send_expect("set fwd rxonly", "testpmd> ", 15) - self.dut.send_expect("set verbose 1", "testpmd> ", 15) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ", 15) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 15) # create rules with same parameters but different action - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 1 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end actions queue index 3 / end", "testpmd>", 15, ) self.verify("Successed" and "(1)" in out, "failed: fdir rule can't be created") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 dst is 192.168.0.3 tos is 4 / tcp src is 25 dst is 23 / end actions queue index 1 / end", "testpmd>", 15, @@ -492,32 +492,32 @@ class TestRteflowPriority(TestCase): ) # send a pkt to check the switch rule is work for its high priority - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>", 20) a = self.check_link() self.verify(a, "failed: link can not up") self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:01")/IP(src="192.168.0.2",dst="192.168.0.3",tos=4)/TCP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 1) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 1") # remove the switch rule and check the fdir rule is work - self.dut.send_expect("flow destroy 0 rule 1", "testpmd>", 15) - self.dut.send_expect("start", "testpmd>", 20) + self.sut_node.send_expect("flow destroy 0 rule 1", "testpmd>", 15) + self.sut_node.send_expect("start", "testpmd>", 20) self.send_pkt( 'sendp([Ether(dst="00:00:00:00:01:00",src="11:22:33:44:55:02")/IP(src="192.168.0.2",dst="192.168.0.3",tos=4)/TCP(sport=25,dport=23)/Raw("x"*80)],iface="%s")' % (self.__tx_iface) ) - out = self.dut.send_expect("stop", "testpmd>", 20) + out = self.sut_node.send_expect("stop", "testpmd>", 20) pkt_num = self.check_queue_rx_packets_number(out, 3) self.verify(pkt_num == 1, "failed: the flow rule can not work") self.logger.info("pass: queue id is 3") - self.dut.send_expect("flow flush 0", "testpmd>", 20) - self.dut.send_expect("quit", "#", 50) + self.sut_node.send_expect("flow flush 0", "testpmd>", 20) + self.sut_node.send_expect("quit", "#", 50) # ########################################################################### @@ -526,4 +526,4 @@ class TestRteflowPriority(TestCase): pass def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_runtime_vf_queue_number.py b/tests/TestSuite_runtime_vf_queue_number.py index af89ca1f..74f62f38 100644 --- a/tests/TestSuite_runtime_vf_queue_number.py +++ b/tests/TestSuite_runtime_vf_queue_number.py @@ -10,9 +10,9 @@ DPDK Test suite. import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.qemu_kvm import QEMUKvm +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase RSS_KEY = "6EA6A420D5138E712433B813AE45B3C4BECB2B405F31AD6C331835372D15E2D5E49566EE0ED1962AFA1B7932F3549520FD71C75E" @@ -23,20 +23,20 @@ class TestRuntimeVfQn(TestCase): supported_vf_driver = ["pci-stub", "vfio-pci"] def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.src_intf = self.tester.get_interface(self.tester.get_local_port(0)) - self.src_mac = self.tester.get_mac(self.tester.get_local_port(0)) - self.dst_mac = self.dut.get_mac_address(0) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.src_intf = self.tg_node.get_interface(self.tg_node.get_local_port(0)) + self.src_mac = self.tg_node.get_mac(self.tg_node.get_local_port(0)) + self.dst_mac = self.sut_node.get_mac_address(0) self.vm0 = None - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.used_dut_port = self.dut_ports[0] + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.used_sut_port = self.sut_ports[0] self.vf_mac = "00:11:22:33:44:55" - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): - self.dut.kill_all() - self.host_testpmd = PmdOutput(self.dut) + self.sut_node.kill_all() + self.host_testpmd = PmdOutput(self.sut_node) self.setup_vm_env(driver="igb_uio") def setup_vm_env(self, driver="default"): @@ -44,8 +44,8 @@ class TestRuntimeVfQn(TestCase): setup qemu virtual environment,this is to set up 1pf and 2vfs environment, the pf can be bond to kernel driver or dpdk driver. """ - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()["vf_driver"] @@ -56,7 +56,7 @@ class TestRuntimeVfQn(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") try: for port in self.sriov_vfs_port_0: @@ -67,10 +67,10 @@ class TestRuntimeVfQn(TestCase): vf1_prop = {"opt_host": self.sriov_vfs_port_0[1].pci} # set up VM0 ENV - self.vm0 = QEMUKvm(self.dut, "vm0", "vf_queue_number") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "vf_queue_number") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.logger.info(e) @@ -80,7 +80,7 @@ class TestRuntimeVfQn(TestCase): def destroy_vm_env(self): # destroy vm0 if getattr(self, "vm0", None) and self.vm0: - self.vm0_dut_ports = None + self.vm0_sut_ports = None self.vm0.stop() self.vm0 = None @@ -90,24 +90,24 @@ class TestRuntimeVfQn(TestCase): self.host_testpmd = None # reset used port's sriov - if getattr(self, "used_dut_port", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver() - self.used_dut_port = None + self.used_sut_port = None # bind used ports with default driver - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() def send_packet(self, vf_mac, itf, integer): """ Sends packets. """ - pkt = Packet() - pkt.generate_random_pkts(vf_mac, pktnum=integer, random_type=["IP_RAW"]) - pkt.send_pkt(self.tester, tx_port=itf) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.generate_random_pkts(vf_mac, pktnum=integer, random_type=["IP_RAW"]) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) def verify_queue_number(self, outstring, qn, pkt_count): total_rx = [] @@ -148,7 +148,7 @@ class TestRuntimeVfQn(TestCase): def stop_vm0(self): if getattr(self, "vm0", None) and self.vm0: - self.vm0_dut_ports = None + self.vm0_sut_ports = None self.vm0.stop() self.vm0 = None @@ -179,8 +179,8 @@ class TestRuntimeVfQn(TestCase): "port 0: RX queue number: 1 Tx queue number: 1" in outstring, "The RX/TX queue number error.", ) - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_dut_ports[0]) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_sut_ports[0]) self.send_packet(self.vf_mac, self.src_intf, 3) out = self.vm0_testpmd.get_output() self.verify( @@ -197,8 +197,8 @@ class TestRuntimeVfQn(TestCase): in outstring, "The RX/TX queue number error.", ) - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_dut_ports[0]) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_sut_ports[0]) self.send_packet(self.vf_mac, self.src_intf, pkt_num) outstring1 = self.vm0_testpmd.execute_cmd("stop", "testpmd> ", 120) time.sleep(2) @@ -224,9 +224,9 @@ class TestRuntimeVfQn(TestCase): ) gest_eal_param = ( - "-a %s --file-prefix=test2" % self.vm_dut_0.ports_info[0]["pci"] + "-a %s --file-prefix=test2" % self.vm_sut_0.ports_info[0]["pci"] ) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd( self.pmdout.default_cores, eal_param=gest_eal_param, param="" ) @@ -239,8 +239,8 @@ class TestRuntimeVfQn(TestCase): in outstring, "The RX/TX queue number error.", ) - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_dut_ports[0]) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vf_mac = self.vm0_testpmd.get_port_mac(self.vm0_sut_ports[0]) self.send_packet(self.vf_mac, self.src_intf, PACKET_COUNT) outstring1 = self.vm0_testpmd.execute_cmd("stop", "testpmd> ") self.verify_queue_number(outstring1, qn, PACKET_COUNT) @@ -258,7 +258,7 @@ class TestRuntimeVfQn(TestCase): self.logger.info(outstring3) self.verify_queue_number(outstring3, qn + 1, PACKET_COUNT) self.vm0_testpmd.execute_cmd("quit", "# ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_reserve_invalid_vf_qn(self): """ @@ -288,7 +288,7 @@ class TestRuntimeVfQn(TestCase): "it must be power of 2 and equal or less than 16" in testpmd_out, "there is no 'Wrong VF queue number = 0' logs.", ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_set_valid_vf_qn_in_testpmd(self): """ @@ -303,9 +303,9 @@ class TestRuntimeVfQn(TestCase): ) gest_eal_param = ( - "-a %s --file-prefix=test2" % self.vm_dut_0.ports_info[0]["pci"] + "-a %s --file-prefix=test2" % self.vm_sut_0.ports_info[0]["pci"] ) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) for valid_qn in range(1, 17): count = valid_qn * 10 if valid_qn == 1: @@ -348,14 +348,14 @@ class TestRuntimeVfQn(TestCase): self.pmdout.default_cores, param="", eal_param=host_eal_param ) gest_eal_param = ( - "-a %s --file-prefix=test2" % self.vm_dut_0.ports_info[0]["pci"] + "-a %s --file-prefix=test2" % self.vm_sut_0.ports_info[0]["pci"] ) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) - app_name = self.dut.apps_name["test-pmd"] + app_name = self.sut_node.apps_name["test-pmd"] command_0 = app_name + "-c %s -n %d %s -- -i %s" % ( "0xf", - self.dut.get_memory_channels(), + self.sut_node.get_memory_channels(), gest_eal_param, " --rxq=0 --txq=0", ) @@ -367,7 +367,7 @@ class TestRuntimeVfQn(TestCase): time.sleep(2) command_257 = app_name + "-c %s -n %d %s -- -i %s" % ( "0xf", - self.dut.get_memory_channels(), + self.sut_node.get_memory_channels(), gest_eal_param, " --rxq=257 --txq=257", ) @@ -390,9 +390,9 @@ class TestRuntimeVfQn(TestCase): ) gest_eal_param = ( - "-a %s --file-prefix=test2" % self.vm_dut_0.ports_info[0]["pci"] + "-a %s --file-prefix=test2" % self.vm_sut_0.ports_info[0]["pci"] ) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd( self.pmdout.default_cores, eal_param=gest_eal_param, param="" ) @@ -430,9 +430,9 @@ class TestRuntimeVfQn(TestCase): self.pmdout.default_cores, param="", eal_param=host_eal_param ) gest_eal_param = ( - "-a %s --file-prefix=test2" % self.vm_dut_0.ports_info[0]["pci"] + "-a %s --file-prefix=test2" % self.vm_sut_0.ports_info[0]["pci"] ) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd( self.pmdout.default_cores, eal_param=gest_eal_param, param="" ) @@ -462,14 +462,14 @@ class TestRuntimeVfQn(TestCase): self.host_testpmd.start_testpmd( self.pmdout.default_cores, param="", eal_param=host_eal_param ) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) - self.vm_dut_0.restore_interfaces() + self.vm0_testpmd = PmdOutput(self.vm_sut_0) + self.vm_sut_0.restore_interfaces() # wait few seconds for link ready countdown = 60 while countdown: nic_info = self.vm0_testpmd.execute_cmd( "./usertools/dpdk-devbind.py -s | grep %s" - % self.vm_dut_0.ports_info[0]["pci"], + % self.vm_sut_0.ports_info[0]["pci"], expected="# ", ) inf_str = nic_info.split("if=")[1] @@ -488,9 +488,9 @@ class TestRuntimeVfQn(TestCase): def tear_down(self): self.stop_vm0() - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): self.destroy_vm_env() - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) diff --git a/tests/TestSuite_runtime_vf_queue_number_kernel.py b/tests/TestSuite_runtime_vf_queue_number_kernel.py index ba83717a..a4229e1d 100644 --- a/tests/TestSuite_runtime_vf_queue_number_kernel.py +++ b/tests/TestSuite_runtime_vf_queue_number_kernel.py @@ -11,8 +11,8 @@ import random import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.virt_common import VM @@ -39,8 +39,8 @@ class TestRuntimeVfQueueNumberKernel(TestCase): ], "Only supported by Intel® Ethernet 700 Series and Intel® Ethernet 800 Series", ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None # set vf assign method and vf driver @@ -52,22 +52,22 @@ class TestRuntimeVfQueueNumberKernel(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.setup_1pf_2vf_1vm_env_flag = 0 self.setup_1pf_2vf_1vm_env(driver="") - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.portMask = utils.create_mask([self.vm0_dut_ports[0]]) - self.vm_dut_0.vm_pci0 = self.vm_dut_0.ports_info[0]["pci"] + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.portMask = utils.create_mask([self.vm0_sut_ports[0]]) + self.vm_sut_0.vm_pci0 = self.vm_sut_0.ports_info[0]["pci"] def set_up(self): pass def setup_1pf_2vf_1vm_env(self, driver="default"): - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] try: for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) @@ -77,14 +77,14 @@ class TestRuntimeVfQueueNumberKernel(TestCase): vf1_prop = {"opt_host": self.sriov_vfs_port[1].pci} if driver == "igb_uio": # start testpmd without the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) self.host_testpmd.start_testpmd("Default") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "runtime_vf_queue_number_kernel") + self.vm0 = VM(self.sut_node, "vm0", "runtime_vf_queue_number_kernel") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") self.setup_1pf_2vf_1vm_env_flag = 1 @@ -96,37 +96,37 @@ class TestRuntimeVfQueueNumberKernel(TestCase): if getattr(self, "vm0", None): # destroy testpmd in vm0 self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm0 = None if getattr(self, "host_testpmd", None): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver() - self.used_dut_port = None + self.used_sut_port = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_1pf_2vf_1vm_env_flag = 0 def send_packet2different_queue(self, dst, src, iface, count): - pkt = Packet() - pkt.generate_random_pkts( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.generate_random_pkts( pktnum=count, random_type=["IP_RAW"], options={ "layers_config": [("ether", {"dst": "%s" % dst, "src": "%s" % src})] }, ) - pkt.send_pkt(self.tester, tx_port=iface) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=iface) def check_result(self, nr_queue, out, out2, pkts_num, count, misc): if nr_queue == 1: @@ -162,11 +162,11 @@ class TestRuntimeVfQueueNumberKernel(TestCase): random_queue = random.randint(2, 15) queue_nums = [1, random_queue, self.max_queue] for nr_queue in queue_nums: - self.vm0_testpmd = PmdOutput(self.vm_dut_0) - eal_param = "-a %(vf0)s" % {"vf0": self.vm_dut_0.vm_pci0} - tx_port = self.tester.get_local_port(self.dut_ports[0]) - tester_mac = self.tester.get_mac(tx_port) - iface = self.tester.get_interface(tx_port) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) + eal_param = "-a %(vf0)s" % {"vf0": self.vm_sut_0.vm_pci0} + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(tx_port) + iface = self.tg_node.get_interface(tx_port) count = nr_queue * 20 times = 3 @@ -203,11 +203,11 @@ class TestRuntimeVfQueueNumberKernel(TestCase): in out, "queue number maybe error", ) - self.send_packet2different_queue(vf0_mac, tester_mac, iface, count) + self.send_packet2different_queue(vf0_mac, tg_mac, iface, count) out = self.vm0_testpmd.get_output() out2 = self.vm0_testpmd.execute_cmd("stop") pkts_num = out.count( - "src=%s - dst=%s" % (tester_mac.upper(), vf0_mac.upper()) + "src=%s - dst=%s" % (tg_mac.upper(), vf0_mac.upper()) ) misc = out.count("dst=FF:FF:FF:FF:FF:FF") self.logger.info("get %d boadcast misc packages " % misc) @@ -216,15 +216,15 @@ class TestRuntimeVfQueueNumberKernel(TestCase): def test_set_invalid_vf_queue_num_command_line(self): invalid_queue_num = [0, 257] for i in invalid_queue_num: - self.vm0_testpmd = PmdOutput(self.vm_dut_0) - self.vm_dut_0.session_secondary = self.vm_dut_0.new_session() - app_name = self.vm_dut_0.apps_name["test-pmd"] + self.vm0_testpmd = PmdOutput(self.vm_sut_0) + self.vm_sut_0.session_secondary = self.vm_sut_0.new_session() + app_name = self.vm_sut_0.apps_name["test-pmd"] cmd = app_name + "-c 0xf -n 1 -a %s -- -i --txq=%s --rxq=%s" % ( - self.vm_dut_0.vm_pci0, + self.vm_sut_0.vm_pci0, i, i, ) - out = self.vm_dut_0.session_secondary.send_expect(cmd, "# ", 40) + out = self.vm_sut_0.session_secondary.send_expect(cmd, "# ", 40) if i == 0: self.verify( "Either rx or tx queues should be non-zero" in out, @@ -240,18 +240,18 @@ class TestRuntimeVfQueueNumberKernel(TestCase): def test_set_valid_vf_queue_num_with_function(self): random_queue = random.randint(2, 15) queue_nums = [1, random_queue, self.max_queue] - self.vm0_testpmd = PmdOutput(self.vm_dut_0) - eal_param = "-a %(vf0)s" % {"vf0": self.vm_dut_0.vm_pci0} - tx_port = self.tester.get_local_port(self.dut_ports[0]) - tester_mac = self.tester.get_mac(tx_port) - iface = self.tester.get_interface(tx_port) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) + eal_param = "-a %(vf0)s" % {"vf0": self.vm_sut_0.vm_pci0} + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tg_mac = self.tg_node.get_mac(tx_port) + iface = self.tg_node.get_interface(tx_port) for nr_queue in queue_nums: times = 3 count = nr_queue * 20 # try tree times to make sure testpmd launched normally while times > 0: - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) out = self.vm0_testpmd.start_testpmd("all", eal_param=eal_param) self.logger.info(out) self.vm0_testpmd.execute_cmd("port stop all") @@ -286,11 +286,11 @@ class TestRuntimeVfQueueNumberKernel(TestCase): in out, "queue number %s maybe error" % nr_queue, ) - self.send_packet2different_queue(vf0_mac, tester_mac, iface, count) + self.send_packet2different_queue(vf0_mac, tg_mac, iface, count) out = self.vm0_testpmd.get_output() out2 = self.vm0_testpmd.execute_cmd("stop") pkts_num = out.count( - "src=%s - dst=%s" % (tester_mac.upper(), vf0_mac.upper()) + "src=%s - dst=%s" % (tg_mac.upper(), vf0_mac.upper()) ) misc = out.count("dst=FF:FF:FF:FF:FF:FF") self.logger.info("get %d broadcast misc packages " % misc) @@ -298,8 +298,8 @@ class TestRuntimeVfQueueNumberKernel(TestCase): def test_set_invalid_vf_queue_num_with_testpmd_command(self): invalid_queue_num = [0, 257] - self.vm0_testpmd = PmdOutput(self.vm_dut_0) - eal_param = "-a %(vf0)s" % {"vf0": self.vm_dut_0.vm_pci0} + self.vm0_testpmd = PmdOutput(self.vm_sut_0) + eal_param = "-a %(vf0)s" % {"vf0": self.vm_sut_0.vm_pci0} self.vm0_testpmd.start_testpmd("all", eal_param=eal_param) self.vm0_testpmd.execute_cmd("set promisc all off") self.vm0_testpmd.execute_cmd("set fwd mac") @@ -328,7 +328,7 @@ class TestRuntimeVfQueueNumberKernel(TestCase): def tear_down(self): self.vm0_testpmd.execute_cmd("quit", "# ") - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): self.logger.info("tear_down_all") diff --git a/tests/TestSuite_runtime_vf_queue_number_maxinum.py b/tests/TestSuite_runtime_vf_queue_number_maxinum.py index 9424010e..edff070b 100644 --- a/tests/TestSuite_runtime_vf_queue_number_maxinum.py +++ b/tests/TestSuite_runtime_vf_queue_number_maxinum.py @@ -32,14 +32,14 @@ class TestRuntimeVfQnMaxinum(TestCase): ], "Only supported by Intel® Ethernet 700 Series", ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.src_intf = self.tester.get_interface(self.tester.get_local_port(0)) - self.src_mac = self.tester.get_mac(self.tester.get_local_port(0)) - self.dst_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.used_dut_port = self.dut_ports[0] - self.pmdout = PmdOutput(self.dut) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.src_intf = self.tg_node.get_interface(self.tg_node.get_local_port(0)) + self.src_mac = self.tg_node.get_mac(self.tg_node.get_local_port(0)) + self.dst_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.used_sut_port = self.sut_ports[0] + self.pmdout = PmdOutput(self.sut_node) self.setup_test_env("igb_uio") def set_up(self): @@ -54,16 +54,16 @@ class TestRuntimeVfQnMaxinum(TestCase): Start testpmd based on the created vfs. """ if self.nic in ["I40E_10G-SFP_XL710"]: - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 32, driver=driver) + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 32, driver=driver) elif self.nic in [ "I40E_25G-25G_SFP28", "I40E_40G-QSFP_A", "I40E_10G-SFP_X722", "I40E_10G-10G_BASE_T_X722", ]: - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 64, driver=driver) + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 64, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()["vf_driver"] @@ -73,11 +73,11 @@ class TestRuntimeVfQnMaxinum(TestCase): for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) - self.vf1_session = self.dut.new_session() - self.vf2_session = self.dut.new_session() - self.pf_pmdout = PmdOutput(self.dut) - self.vf1_pmdout = PmdOutput(self.dut, self.vf1_session) - self.vf2_pmdout = PmdOutput(self.dut, self.vf2_session) + self.vf1_session = self.sut_node.new_session() + self.vf2_session = self.sut_node.new_session() + self.pf_pmdout = PmdOutput(self.sut_node) + self.vf1_pmdout = PmdOutput(self.sut_node, self.vf1_session) + self.vf2_pmdout = PmdOutput(self.sut_node, self.vf2_session) def destroy_test_env(self): if getattr(self, "pf_pmdout", None): @@ -88,32 +88,32 @@ class TestRuntimeVfQnMaxinum(TestCase): self.vf1_pmdout.execute_cmd("quit", "# ", timeout=200) self.vf1_pmdout = None if getattr(self, "vf1_session", None): - self.dut.close_session(self.vf1_session) + self.sut_node.close_session(self.vf1_session) if getattr(self, "vf2_pmdout", None): self.vf2_pmdout.execute_cmd("quit", "# ") self.vf2_pmdout = None if getattr(self, "vf2_session", None): - self.dut.close_session(self.vf2_session) + self.sut_node.close_session(self.vf2_session) if getattr(self, "vf3_pmdout", None): self.vf3_pmdout.execute_cmd("quit", "# ", timeout=150) self.vf3_pmdout = None if getattr(self, "vf3_session", None): - self.dut.close_session(self.vf3_session) + self.sut_node.close_session(self.vf3_session) # reset used port's sriov - if getattr(self, "used_dut_port", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver() - self.used_dut_port = None + self.used_sut_port = None def send_packet(self, dest_mac, itf, count): """ Sends packets. """ - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() time.sleep(2) for i in range(count): quotient = i // 254 @@ -124,8 +124,8 @@ class TestRuntimeVfQnMaxinum(TestCase): dest_mac, itf, quotient, remainder + 1, itf ) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(2) def test_vf_consume_max_queues_on_one_pf(self): @@ -166,8 +166,8 @@ class TestRuntimeVfQnMaxinum(TestCase): if vf1_allow_index > self.max_allow_per_testpmd: vf3_allow_index = vf1_allow_index % self.max_allow_per_testpmd vf1_allow_index = vf1_allow_index - vf3_allow_index - self.vf3_session = self.dut.new_session() - self.vf3_pmdout = PmdOutput(self.dut, self.vf3_session) + self.vf3_session = self.sut_node.new_session() + self.vf3_pmdout = PmdOutput(self.sut_node, self.vf3_session) self.logger.info( "vf2_queue_number: {}, vf3_allow_index: {}".format( @@ -268,7 +268,7 @@ class TestRuntimeVfQnMaxinum(TestCase): ) # send packets to vf1/vf2, and check all the queues could receive packets - # as set promisc on, packets send by tester could be received by both vf1 and vf2 + # as set promisc on, packets send by TG could be received by both vf1 and vf2 self.vf1_pmdout.execute_cmd("set promisc all on") if vf2_queue_number > 0: self.vf2_pmdout.execute_cmd("set promisc all on") diff --git a/tests/TestSuite_rxtx_callbacks.py b/tests/TestSuite_rxtx_callbacks.py index 3fdef4de..409a950f 100644 --- a/tests/TestSuite_rxtx_callbacks.py +++ b/tests/TestSuite_rxtx_callbacks.py @@ -19,16 +19,16 @@ class TestRxtxCallbacks(TestCase): Run at the start of each test suite. timer prerequistites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/2C/1T") + cores = self.sut_node.get_core_list("1S/2C/1T") self.coremask = utils.create_mask(cores) - self.mac = self.dut.get_mac_address(self.dut_ports[0]) - self.app_rxtx_callbacks_path = self.dut.apps_name["rxtx_callbacks"] + self.mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.app_rxtx_callbacks_path = self.sut_node.apps_name["rxtx_callbacks"] - out = self.dut.build_dpdk_apps("./examples/rxtx_callbacks") + out = self.sut_node.build_dpdk_apps("./examples/rxtx_callbacks") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -39,18 +39,18 @@ class TestRxtxCallbacks(TestCase): pass def test_rxtx_callbacks(self): - eal_para = self.dut.create_eal_parameters(cores="1S/2C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/1T") cmd = self.app_rxtx_callbacks_path + " %s" % eal_para - self.dut.send_expect(cmd, "forwarding packets", 60) + self.sut_node.send_expect(cmd, "forwarding packets", 60) - self.iface_port0 = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.iface_port0 = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - self.iface_port1 = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + self.iface_port1 = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) - self.inst_port1 = self.tester.tcpdump_sniff_packets(self.iface_port1) + self.inst_port1 = self.tg_node.tcpdump_sniff_packets(self.iface_port1) self.scapy_send_packet(self.iface_port0) out_port1 = self.get_tcpdump_package(self.inst_port1) @@ -62,14 +62,14 @@ class TestRxtxCallbacks(TestCase): """ Send a packet to port """ - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s")/IP()/UDP()/Raw(\'X\'*18)], iface="%s", count=4)' % (self.mac, iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def get_tcpdump_package(self, inst): - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) dsts = [] for i in range(len(pkts)): dst = pkts.strip_element_layer2("dst", p_index=i) @@ -80,7 +80,7 @@ class TestRxtxCallbacks(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_rxtx_offload.py b/tests/TestSuite_rxtx_offload.py index 3f56993e..4310d8f4 100644 --- a/tests/TestSuite_rxtx_offload.py +++ b/tests/TestSuite_rxtx_offload.py @@ -13,11 +13,10 @@ import re import time import framework.utils as utils -from framework.dut import Dut -from framework.packet import Packet from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut from framework.settings import DRIVERS, HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase ETHER_STANDARD_MTU = 1518 @@ -81,20 +80,20 @@ class TestRxTx_Offload(TestCase): "NIC Unsupported: " + str(self.nic), ) # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - - localPort0 = self.tester.get_local_port(self.dut_ports[0]) - localPort1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_itf0 = self.tester.get_interface(localPort0) - self.tester_itf1 = self.tester.get_interface(localPort1) - - self.tester_mac0 = self.tester.get_mac(localPort0) - self.pf_interface = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pf_mac = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.dut_ports[0]]["pci"] - self.pmdout = PmdOutput(self.dut) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + + localPort0 = self.tg_node.get_local_port(self.sut_ports[0]) + localPort1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_itf0 = self.tg_node.get_interface(localPort0) + self.tg_itf1 = self.tg_node.get_interface(localPort1) + + self.tg_mac0 = self.tg_node.get_mac(localPort0) + self.pf_interface = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pf_mac = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"] + self.pmdout = PmdOutput(self.sut_node) self.cores = "1S/4C/1T" def set_up(self): @@ -104,13 +103,13 @@ class TestRxTx_Offload(TestCase): pass def verify_link_up(self, max_delay=10): - ports = self.dut.get_ports(self.nic) + ports = self.sut_node.get_ports(self.nic) for port_id in range(len(ports)): - out = self.dut.send_expect("show port info %s" % port_id, "testpmd> ") + out = self.sut_node.send_expect("show port info %s" % port_id, "testpmd> ") port_time_up = 0 while (port_time_up <= max_delay) and ("Link status: down" in out): time.sleep(1) - out = self.dut.send_expect("show port info %s" % port_id, "testpmd> ") + out = self.sut_node.send_expect("show port info %s" % port_id, "testpmd> ") port_time_up += 1 self.verify( "Link status: down" not in out, @@ -124,11 +123,11 @@ class TestRxTx_Offload(TestCase): global offloads offload_keys = [] if rxtx == "rx": - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "show port 0 rx_offload capabilities", "testpmd> " ) elif rxtx == "tx": - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "show port 0 tx_offload capabilities", "testpmd> " ) @@ -150,16 +149,16 @@ class TestRxTx_Offload(TestCase): """ global offloads - result_config = self.dut.send_expect("port start %d" % port_id, "testpmd> ") + result_config = self.sut_node.send_expect("port start %d" % port_id, "testpmd> ") self.verify("Fail" not in result_config, "Fail to configure port") self.verify_link_up(20) if rxtx == "rx": - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "show port %d rx_offload configuration" % port_id, "testpmd> " ) elif rxtx == "tx": - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "show port %d tx_offload configuration" % port_id, "testpmd> " ) @@ -192,17 +191,17 @@ class TestRxTx_Offload(TestCase): """ global offloads - result_config = self.dut.send_expect("port start 0", "testpmd> ") + result_config = self.sut_node.send_expect("port start 0", "testpmd> ") self.verify("Fail" not in result_config, "Fail to configure port") self.verify_link_up(20) acl_offloads = [] if rxtx == "rx": - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "show port 0 rx_offload configuration", "testpmd> " ) elif rxtx == "tx": - outstring = self.dut.send_expect( + outstring = self.sut_node.send_expect( "show port 0 tx_offload configuration", "testpmd> " ) @@ -229,13 +228,13 @@ class TestRxTx_Offload(TestCase): def verify_packets_increasing(self, rxtx="tx", count=2): # Verify RXTX-packets increasing on each ports, check count < 25 - out1 = self.dut.send_expect("show port stats all", "testpmd> ") + out1 = self.sut_node.send_expect("show port stats all", "testpmd> ") i = 0 while i < count: if rxtx == "tx": pks_l1 = re.findall(r"TX-packets: (\w+)", out1) time.sleep(15) - out1 = self.dut.send_expect("show port stats all", "testpmd> ") + out1 = self.sut_node.send_expect("show port stats all", "testpmd> ") pks_l2 = re.findall(r"TX-packets: (\w+)", out1) self.logger.info( "Times=" @@ -262,15 +261,15 @@ class TestRxTx_Offload(TestCase): """ Send packet and get the queue which packet enter. """ - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.verify_link_up(20) - self.tester.scapy_foreground() - self.tester.scapy_append(packet) - self.tester.scapy_execute() - outstring = self.dut.send_expect("stop", "testpmd> ") + self.tg_node.scapy_foreground() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() + outstring = self.sut_node.send_expect("stop", "testpmd> ") self.verify("Queue" in outstring, "the packet is not received.") result_scanner = ( - r"Forward Stats for RX Port= %s/Queue=\s?([0-9]+)" % self.dut_ports[0] + r"Forward Stats for RX Port= %s/Queue=\s?([0-9]+)" % self.sut_ports[0] ) scanner = re.compile(result_scanner, re.DOTALL) m = scanner.search(outstring) @@ -282,13 +281,13 @@ class TestRxTx_Offload(TestCase): """ Sends packets and check the flag. """ - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") self.verify_link_up(20) - self.tester.scapy_foreground() - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(2) - outstring = self.dut.get_session_output(timeout=1) + outstring = self.sut_node.get_session_output(timeout=1) # get queue ID result_scanner = r"RSS queue=0x([0-9]+)" scanner = re.compile(result_scanner, re.DOTALL) @@ -304,17 +303,17 @@ class TestRxTx_Offload(TestCase): "RTE_MBUF_F_RX_VLAN_STRIPPED" not in outstring, "Fail to configure offload by queue.", ) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") def checksum_valid_flags(self, packet, direction, flags): """ Sends packets and check the checksum valid-flags. """ - self.dut.send_expect("start", "testpmd>") - self.tester.scapy_foreground() - self.tester.scapy_append(packet) - self.tester.scapy_execute() - out = self.dut.get_session_output(timeout=1) + self.sut_node.send_expect("start", "testpmd>") + self.tg_node.scapy_foreground() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() + out = self.sut_node.get_session_output(timeout=1) lines = out.split("\r\n") # collect the rx checksum result @@ -375,29 +374,29 @@ class TestRxTx_Offload(TestCase): and ("RTE_MBUF_F_TX_IP_CKSUM" not in line), "The tx checksum flag is wrong!", ) - self.dut.send_expect("stop", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") def verify_result(self, packet, expect_rxpkts, expect_queue): """ verify if the packet is to the expected queue """ - result_config = self.dut.send_expect("port start 0", "testpmd> ") + result_config = self.sut_node.send_expect("port start 0", "testpmd> ") self.verify("Fail" not in result_config, "Fail to configure port") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.verify_link_up(20) - self.tester.scapy_foreground() - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(2) - outstring = self.dut.send_expect("stop", "testpmd> ", 120) + outstring = self.sut_node.send_expect("stop", "testpmd> ", 120) time.sleep(2) if expect_rxpkts == 0: self.verify("Queue" not in outstring, "the packet is still received.") else: result_scanner = ( - r"Forward Stats for RX Port= %s/Queue=\s?([0-9]+)" % self.dut_ports[0] + r"Forward Stats for RX Port= %s/Queue=\s?([0-9]+)" % self.sut_ports[0] ) scanner = re.compile(result_scanner, re.DOTALL) m = scanner.search(outstring) @@ -409,15 +408,15 @@ class TestRxTx_Offload(TestCase): def start_tcpdump(self, rxItf): - self.tester.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -i %s -w ./getPackageByTcpdump.cap 2> /dev/null& " % rxItf, "#" ) def get_tcpdump_package(self): - self.tester.send_expect("killall tcpdump", "#") - return self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + return self.tg_node.send_expect( "tcpdump -nn -e -v -c 1024 -r ./getPackageByTcpdump.cap", "#", 120 ) @@ -428,27 +427,27 @@ class TestRxTx_Offload(TestCase): # Define rx checksum packets self.rxcksum_pkt1 = ( r'sendp([Ether(dst="%s")/IP(dst="192.168.0.1")/TCP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.rxcksum_pkt2 = ( r'sendp([Ether(dst="%s")/IP(chksum=0x0)/TCP(chksum=0xf)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.rxcksum_pkt3 = ( r'sendp([Ether(dst="%s")/IP(dst="192.168.0.1")/UDP(chksum=0xf)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.rxcksum_pkt4 = ( r'sendp([Ether(dst="%s")/IP(chksum=0x0)/UDP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) flags = [] self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --enable-rx-cksum" ) - self.dut.send_expect("set fwd csum", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd csum", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") offload = ["ipv4_cksum", "udp_cksum", "tcp_cksum"] self.check_port_config("rx", offload) @@ -459,29 +458,29 @@ class TestRxTx_Offload(TestCase): self.checksum_valid_flags(self.rxcksum_pkt4, "rx", ["ipv4"]) # Disable the rx cksum per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload udp_cksum off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload udp_cksum off", "testpmd> ") offload = ["ipv4_cksum", "tcp_cksum"] self.check_port_config("rx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload ipv4_cksum off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload ipv4_cksum off", "testpmd> ") offload = ["tcp_cksum"] self.check_port_config("rx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload tcp_cksum off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload tcp_cksum off", "testpmd> ") self.check_port_config("rx", "NULL") # Enable the rx cksum per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload ipv4_cksum on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload ipv4_cksum on", "testpmd> ") offload = ["ipv4_cksum"] self.check_port_config("rx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload tcp_cksum on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload tcp_cksum on", "testpmd> ") offload = ["ipv4_cksum", "tcp_cksum"] self.check_port_config("rx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload udp_cksum on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload udp_cksum on", "testpmd> ") offload = ["ipv4_cksum", "tcp_cksum", "udp_cksum"] self.check_port_config("rx", offload) @@ -498,14 +497,14 @@ class TestRxTx_Offload(TestCase): ): continue if capability != "jumboframe": - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 rx_offload %s on" % capability, "testpmd> " ) offload = [capability] self.check_port_config("rx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 rx_offload %s off" % capability, "testpmd> " ) self.check_port_config("rx", "NULL") @@ -529,24 +528,24 @@ class TestRxTx_Offload(TestCase): # Define the vlan packets self.vlan_pkt1 = ( r'sendp([Ether(dst="%s")/Dot1Q(vlan=1)/IP(src="192.168.0.1",dst="192.168.0.3")/UDP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.vlan_pkt2 = ( r'sendp([Ether(dst="%s")/Dot1Q(vlan=1)/IP(src="192.168.0.2",dst="192.168.0.3")/UDP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.pmdout.start_testpmd("%s" % self.cores, "--rxq=4 --txq=4") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") offload = ["NULL", "NULL", "NULL", "NULL"] self.check_queue_config("rx", offload) # Enable vlan_strip per_queue. - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port 0 rxq 0 rx_offload vlan_strip on", "testpmd> ") - self.dut.send_expect("port 0 rxq 2 rx_offload vlan_strip on", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port 0 rxq 0 rx_offload vlan_strip on", "testpmd> ") + self.sut_node.send_expect("port 0 rxq 2 rx_offload vlan_strip on", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") queue = [0, 2] self.check_flag(self.vlan_pkt1, queue) self.check_flag(self.vlan_pkt2, queue) @@ -554,10 +553,10 @@ class TestRxTx_Offload(TestCase): self.check_queue_config("rx", offload) # Disable vlan_strip per_queue - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port 0 rxq 3 rx_offload vlan_strip on", "testpmd> ") - self.dut.send_expect("port 0 rxq 2 rx_offload vlan_strip off", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port 0 rxq 3 rx_offload vlan_strip on", "testpmd> ") + self.sut_node.send_expect("port 0 rxq 2 rx_offload vlan_strip off", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") queue = [0, 3] self.check_flag(self.vlan_pkt1, queue) self.check_flag(self.vlan_pkt2, queue) @@ -565,8 +564,8 @@ class TestRxTx_Offload(TestCase): self.check_queue_config("rx", offload) # Enable vlan_strip per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload vlan_strip on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload vlan_strip on", "testpmd> ") offload = ["vlan_strip"] self.check_port_config("rx", offload) queue = [0, 1, 2, 3] @@ -574,8 +573,8 @@ class TestRxTx_Offload(TestCase): self.check_flag(self.vlan_pkt2, queue) # Disable vlan_strip per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 rx_offload vlan_strip off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 rx_offload vlan_strip off", "testpmd> ") self.check_port_config("rx", "NULL") queue = [] self.check_flag(self.vlan_pkt1, queue) @@ -588,8 +587,8 @@ class TestRxTx_Offload(TestCase): self.pmdout.start_testpmd( "%s" % self.cores, "--rxq=4 --txq=4 --port-topology=loop" ) - self.dut.send_expect("set fwd txonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd txonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") if self.nic in [ "I40E_10G-SFP_XL710", "I40E_40G-QSFP_A", @@ -603,36 +602,36 @@ class TestRxTx_Offload(TestCase): "ICE_100G-E810C_QSFP", "ICE_25G-E810C_SFP", ]: - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 tx_offload mbuf_fast_free off", "testpmd> " ) self.check_port_config("tx", "NULL") - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan" not in out, "There is vlan insert.") # Enable vlan_insert per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 tx_offload vlan_insert on", "testpmd> ") - self.dut.send_expect("tx_vlan set 0 1", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 tx_offload vlan_insert on", "testpmd> ") + self.sut_node.send_expect("tx_vlan set 0 1", "testpmd> ") offload = ["vlan_insert"] self.check_port_config("tx", offload) - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan 1" in out, "There is no vlan insert.") # Disable vlan_insert per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 tx_offload vlan_insert off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 tx_offload vlan_insert off", "testpmd> ") self.check_port_config("tx", "NULL") - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan" not in out, "There is vlan insert.") @@ -644,8 +643,8 @@ class TestRxTx_Offload(TestCase): "%s" % self.cores, "--rxq=4 --txq=4 --port-topology=loop --tx-offloads=0x0001", ) - self.dut.send_expect("set fwd txonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd txonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") if self.nic in [ "I40E_10G-SFP_XL710", "I40E_40G-QSFP_A", @@ -657,63 +656,63 @@ class TestRxTx_Offload(TestCase): "ICE_100G-E810C_QSFP", "ICE_25G-E810C_SFP", ]: - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 tx_offload mbuf_fast_free off", "testpmd> " ) offload = ["vlan_insert"] self.check_port_config("tx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("tx_vlan set 0 1", "testpmd> ") - self.dut.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("tx_vlan set 0 1", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") self.verify_link_up(20) - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan 1" in out, "There is no vlan insert.") # Disable vlan_insert per_queue - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port 0 txq 0 tx_offload vlan_insert off", "testpmd> ") - self.dut.send_expect("port 0 txq 1 tx_offload vlan_insert off", "testpmd> ") - self.dut.send_expect("port 0 txq 2 tx_offload vlan_insert off", "testpmd> ") - self.dut.send_expect("port 0 txq 3 tx_offload vlan_insert off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port 0 txq 0 tx_offload vlan_insert off", "testpmd> ") + self.sut_node.send_expect("port 0 txq 1 tx_offload vlan_insert off", "testpmd> ") + self.sut_node.send_expect("port 0 txq 2 tx_offload vlan_insert off", "testpmd> ") + self.sut_node.send_expect("port 0 txq 3 tx_offload vlan_insert off", "testpmd> ") offload = ["vlan_insert"] self.check_port_config("tx", offload) - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan 1" in out, "There is no vlan insert.") # Disable vlan_insert per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 tx_offload vlan_insert off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 tx_offload vlan_insert off", "testpmd> ") self.check_port_config("tx", "NULL") - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan" not in out, "There is vlan insert.") # Enable vlan_insert per_queue - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port 0 txq 0 tx_offload vlan_insert on", "testpmd> ") - self.dut.send_expect("port 0 txq 1 tx_offload vlan_insert on", "testpmd> ") - self.dut.send_expect("port 0 txq 2 tx_offload vlan_insert on", "testpmd> ") - self.dut.send_expect("port 0 txq 3 tx_offload vlan_insert on", "testpmd> ") - outstring = self.dut.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port 0 txq 0 tx_offload vlan_insert on", "testpmd> ") + self.sut_node.send_expect("port 0 txq 1 tx_offload vlan_insert on", "testpmd> ") + self.sut_node.send_expect("port 0 txq 2 tx_offload vlan_insert on", "testpmd> ") + self.sut_node.send_expect("port 0 txq 3 tx_offload vlan_insert on", "testpmd> ") + outstring = self.sut_node.send_expect("port start 0", "testpmd> ") self.verify("Fail" in outstring, "vlan_insert can be set by queue.") # Enable vlan_insert per_port - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 tx_offload vlan_insert on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 tx_offload vlan_insert on", "testpmd> ") offload = ["vlan_insert"] self.check_port_config("tx", offload) - self.start_tcpdump(self.tester_itf0) - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ") + self.start_tcpdump(self.tg_itf0) + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") out = self.get_tcpdump_package() self.verify("vlan 1" in out, "There is no vlan insert.") @@ -724,25 +723,25 @@ class TestRxTx_Offload(TestCase): # Define tx checksum packets self.txcksum_ipv4 = ( r'sendp([Ether(dst="%s")/IP(dst="192.168.0.1")/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.txcksum_tcp = ( r'sendp([Ether(dst="%s")/IP(dst="192.168.0.1")/TCP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.txcksum_udp = ( r'sendp([Ether(dst="%s")/IP(dst="192.168.0.1")/UDP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) self.txcksum_sctp = ( r'sendp([Ether(dst="%s")/IP(dst="192.168.0.1")/SCTP(sport=33, dport=34)/Raw("x"*20)], iface="%s")' - % (self.pf_mac, self.tester_itf0) + % (self.pf_mac, self.tg_itf0) ) flags = [] self.pmdout.start_testpmd("%s" % self.cores, "--rxq=4 --txq=4") - self.dut.send_expect("set fwd csum", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd csum", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") # Check the tx checksum flags self.checksum_valid_flags(self.txcksum_ipv4, "tx", []) @@ -751,28 +750,28 @@ class TestRxTx_Offload(TestCase): self.checksum_valid_flags(self.txcksum_sctp, "tx", []) # Disable the tx cksum per_port - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect("port config 1 tx_offload ipv4_cksum on", "testpmd> ") - self.dut.send_expect("port start 1", "testpmd> ") + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect("port config 1 tx_offload ipv4_cksum on", "testpmd> ") + self.sut_node.send_expect("port start 1", "testpmd> ") offload = ["ipv4_cksum"] self.check_port_config("tx", offload, 1) self.checksum_valid_flags(self.txcksum_ipv4, "tx", ["ipv4"]) - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect("port config 1 tx_offload tcp_cksum on", "testpmd> ") - self.dut.send_expect("port start 1", "testpmd> ") + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect("port config 1 tx_offload tcp_cksum on", "testpmd> ") + self.sut_node.send_expect("port start 1", "testpmd> ") offload = ["ipv4_cksum", "tcp_cksum"] self.check_port_config("tx", offload, 1) self.checksum_valid_flags(self.txcksum_tcp, "tx", ["ipv4", "tcp"]) - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect("port config 1 tx_offload udp_cksum on", "testpmd> ") + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect("port config 1 tx_offload udp_cksum on", "testpmd> ") offload = ["ipv4_cksum", "tcp_cksum", "udp_cksum"] self.check_port_config("tx", offload, 1) self.checksum_valid_flags(self.txcksum_udp, "tx", ["ipv4", "udp"]) - self.dut.send_expect("port stop 1", "testpmd> ") - self.dut.send_expect("port config 1 tx_offload sctp_cksum on", "testpmd> ") + self.sut_node.send_expect("port stop 1", "testpmd> ") + self.sut_node.send_expect("port config 1 tx_offload sctp_cksum on", "testpmd> ") offload = ["ipv4_cksum", "tcp_cksum", "udp_cksum", "sctp_cksum"] self.check_port_config("tx", offload, 1) self.checksum_valid_flags(self.txcksum_sctp, "tx", ["ipv4", "sctp"]) @@ -795,20 +794,20 @@ class TestRxTx_Offload(TestCase): "ICE_100G-E810C_QSFP", "ICE_25G-E810C_SFP", ]: - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 tx_offload mbuf_fast_free off", "testpmd> " ) - self.dut.send_expect("port start 0", "testpmd> ") + self.sut_node.send_expect("port start 0", "testpmd> ") for capability in capabilities: - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 tx_offload %s on" % capability, "testpmd> " ) offload = [capability] self.check_port_config("tx", offload) - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect( "port config 0 tx_offload %s off" % capability, "testpmd> " ) self.check_port_config("tx", "NULL") @@ -841,16 +840,16 @@ class TestRxTx_Offload(TestCase): self.check_queue_config("tx", offload) # Disable mbuf_fast_free per_port. - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 tx_offload mbuf_fast_free off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 tx_offload mbuf_fast_free off", "testpmd> ") self.check_port_config("tx", "NULL") # Enable mbuf_fast_free per_queue. - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port 0 txq 0 tx_offload mbuf_fast_free on", "testpmd> ") - self.dut.send_expect("port 0 txq 1 tx_offload mbuf_fast_free on", "testpmd> ") - self.dut.send_expect("port 0 txq 2 tx_offload mbuf_fast_free on", "testpmd> ") - self.dut.send_expect("port 0 txq 3 tx_offload mbuf_fast_free on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port 0 txq 0 tx_offload mbuf_fast_free on", "testpmd> ") + self.sut_node.send_expect("port 0 txq 1 tx_offload mbuf_fast_free on", "testpmd> ") + self.sut_node.send_expect("port 0 txq 2 tx_offload mbuf_fast_free on", "testpmd> ") + self.sut_node.send_expect("port 0 txq 3 tx_offload mbuf_fast_free on", "testpmd> ") offload = [ "mbuf_fast_free", "mbuf_fast_free", @@ -860,17 +859,17 @@ class TestRxTx_Offload(TestCase): self.check_queue_config("tx", offload) # Disable mbuf_fast_free per_queue. - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port 0 txq 0 tx_offload mbuf_fast_free off", "testpmd> ") - self.dut.send_expect("port 0 txq 1 tx_offload mbuf_fast_free off", "testpmd> ") - self.dut.send_expect("port 0 txq 2 tx_offload mbuf_fast_free off", "testpmd> ") - self.dut.send_expect("port 0 txq 3 tx_offload mbuf_fast_free off", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port 0 txq 0 tx_offload mbuf_fast_free off", "testpmd> ") + self.sut_node.send_expect("port 0 txq 1 tx_offload mbuf_fast_free off", "testpmd> ") + self.sut_node.send_expect("port 0 txq 2 tx_offload mbuf_fast_free off", "testpmd> ") + self.sut_node.send_expect("port 0 txq 3 tx_offload mbuf_fast_free off", "testpmd> ") offload = ["NULL", "NULL", "NULL", "NULL"] self.check_queue_config("tx", offload) # Enable mbuf_fast_free per_port. - self.dut.send_expect("port stop 0", "testpmd> ") - self.dut.send_expect("port config 0 tx_offload mbuf_fast_free on", "testpmd> ") + self.sut_node.send_expect("port stop 0", "testpmd> ") + self.sut_node.send_expect("port config 0 tx_offload mbuf_fast_free on", "testpmd> ") offload = ["mbuf_fast_free"] self.check_port_config("tx", offload) @@ -881,58 +880,58 @@ class TestRxTx_Offload(TestCase): offload = ["multi_segs"] # Start testpmd with "--tx-offloads=0x00008000" to enable multi_segs tx_offload self.pmdout.start_testpmd("%s" % self.cores, "--tx-offloads=0x00008000") - for portid in range(len(self.dut_ports)): + for portid in range(len(self.sut_ports)): self.check_port_config(rxtx="tx", offload=offload, port_id=portid) # Set fwd to txonly, Set the length of each segment of the TX-ONLY packets, Set the split policy for TX packets, then start to send pkgs - self.dut.send_expect("set fwd txonly", "testpmd> ") - self.dut.send_expect("set txpkts 64,128,512,2000,64,128,512,2000", "testpmd> ") - self.dut.send_expect("set txsplit rand", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd txonly", "testpmd> ") + self.sut_node.send_expect("set txpkts 64,128,512,2000,64,128,512,2000", "testpmd> ") + self.sut_node.send_expect("set txsplit rand", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # Check TX-packets will not hang and continue to increase self.verify_packets_increasing(rxtx="tx") - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ") # Start testpmd again without "--tx-offloads", check multi-segs is disabled by default self.pmdout.start_testpmd("%s" % self.cores, " ") - for portid in range(len(self.dut_ports)): - outstring = self.dut.send_expect( + for portid in range(len(self.sut_ports)): + outstring = self.sut_node.send_expect( "show port %d tx_offload configuration" % portid, "testpmd> " ) self.verify( "MULTI_SEGS" not in outstring, "multi-segs is not disabled by default" ) - self.dut.send_expect("port stop all", "testpmd> ") - for portid in range(len(self.dut_ports)): + self.sut_node.send_expect("port stop all", "testpmd> ") + for portid in range(len(self.sut_ports)): cmd = "port config {} tx_offload multi_segs on".format(portid) - self.dut.send_expect(cmd, "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ") - for portid in range(len(self.dut_ports)): + self.sut_node.send_expect(cmd, "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") + for portid in range(len(self.sut_ports)): self.check_port_config(rxtx="tx", offload=offload, port_id=portid) # Set fwd to txonly, Set the length of each segment of the TX-ONLY packets, Set the split policy for TX packets, then start to send pkgs - self.dut.send_expect("set fwd txonly", "testpmd> ") - self.dut.send_expect("set txpkts 64,128,256,512,64,128,256,512", "testpmd> ") - self.dut.send_expect("set txsplit rand", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd txonly", "testpmd> ") + self.sut_node.send_expect("set txpkts 64,128,256,512,64,128,256,512", "testpmd> ") + self.sut_node.send_expect("set txsplit rand", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") # Check TX-packets will not hang and continue to increase self.verify_packets_increasing(rxtx="tx") - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ") def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): """ Run after each test suite. """ time.sleep(2) - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_scatter.py b/tests/TestSuite_scatter.py index 534b3f25..8985e213 100644 --- a/tests/TestSuite_scatter.py +++ b/tests/TestSuite_scatter.py @@ -8,8 +8,8 @@ Test Scattered Packets. """ import time -from framework.packet import Packet, strip_pktload from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder, strip_pktload from framework.test_case import TestCase # @@ -30,14 +30,14 @@ class TestScatter(TestCase): Run at the start of each test suite. Scatter Prerequisites """ - dutPorts = self.dut.get_ports(self.nic) + sutPorts = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(dutPorts) >= 1, "Insufficient ports") - self.port = dutPorts[0] - tester_port = self.tester.get_local_port(self.port) - self.intf = self.tester.get_interface(tester_port) + self.verify(len(sutPorts) >= 1, "Insufficient ports") + self.port = sutPorts[0] + tg_port = self.tg_node.get_local_port(self.port) + self.intf = self.tg_node.get_interface(tg_port) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) if self.nic in [ "IXGBE_10G-X550EM_A_SFP", "IXGBE_10G-82599_SFP", @@ -62,19 +62,19 @@ class TestScatter(TestCase): else: self.mbsize = 1024 - self.tester.send_expect("ifconfig %s mtu 9000" % self.intf, "#") + self.tg_node.send_expect("ifconfig %s mtu 9000" % self.intf, "#") def scatter_pktgen_send_packet(self, pktsize): """ Functional test for scatter packets. """ - dmac = self.dut.get_mac_address(self.port) + dmac = self.sut_node.get_mac_address(self.port) - inst = self.tester.tcpdump_sniff_packets(self.intf) - pkt = Packet(pkt_type="IP_RAW", pkt_len=pktsize) - pkt.config_layer("ether", {"dst": dmac}) - pkt.send_pkt(self.tester, tx_port=self.intf) - sniff_pkts = self.tester.load_tcpdump_sniff_packets(inst) + inst = self.tg_node.tcpdump_sniff_packets(self.intf) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW", pkt_len=pktsize) + scapy_pkt_builder.config_layer("ether", {"dst": dmac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.intf) + sniff_pkts = self.tg_node.load_tcpdump_sniff_packets(inst) res = "" if len(sniff_pkts): @@ -100,16 +100,16 @@ class TestScatter(TestCase): self.verify("Error" not in out, "launch error 1") - self.dut.send_expect("set fwd mac", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ") self.pmdout.wait_link_status_up(self.port) for offset in [-1, 0, 1, 4, 5]: ret = self.scatter_pktgen_send_packet(self.mbsize + offset) self.verify("58 58 58 58 58 58 58 58" in ret, "packet receive error") - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def tear_down(self): """ @@ -121,4 +121,4 @@ class TestScatter(TestCase): """ Run after each test suite. """ - self.tester.send_expect("ifconfig %s mtu 1500" % self.intf, "#") + self.tg_node.send_expect("ifconfig %s mtu 1500" % self.intf, "#") diff --git a/tests/TestSuite_short_live.py b/tests/TestSuite_short_live.py index a3a17edc..50deda90 100644 --- a/tests/TestSuite_short_live.py +++ b/tests/TestSuite_short_live.py @@ -34,12 +34,12 @@ class TestShortLiveApp(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) self.verify(len(self.ports) >= 2, "Insufficient number of ports.") - self.app_l2fwd_path = self.dut.apps_name["l2fwd"] - self.app_l3fwd_path = self.dut.apps_name["l3fwd"] - self.app_testpmd = self.dut.apps_name["test-pmd"] - self.eal_para = self.dut.create_eal_parameters + self.app_l2fwd_path = self.sut_node.apps_name["l2fwd"] + self.app_l3fwd_path = self.sut_node.apps_name["l3fwd"] + self.app_testpmd = self.sut_node.apps_name["test-pmd"] + self.eal_para = self.sut_node.create_eal_parameters def set_up(self): """ @@ -49,66 +49,66 @@ class TestShortLiveApp(TestCase): def compile_examples(self, example): # compile - out = self.dut.build_dpdk_apps("./examples/%s" % example) + out = self.sut_node.build_dpdk_apps("./examples/%s" % example) self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") def check_forwarding( - self, ports, nic, testerports=[None, None], pktSize=64, received=True + self, ports, nic, tgports=[None, None], pktSize=64, received=True ): self.send_packet( - ports[0], ports[1], self.nic, testerports[1], pktSize, received + ports[0], ports[1], self.nic, tgports[1], pktSize, received ) def send_packet( - self, txPort, rxPort, nic, testerports=None, pktSize=64, received=True + self, txPort, rxPort, nic, tgports=None, pktSize=64, received=True ): """ Send packages according to parameters. """ - if testerports is None: - rxitf = self.tester.get_interface(self.tester.get_local_port(rxPort)) - txitf = self.tester.get_interface(self.tester.get_local_port(txPort)) + if tgports is None: + rxitf = self.tg_node.get_interface(self.tg_node.get_local_port(rxPort)) + txitf = self.tg_node.get_interface(self.tg_node.get_local_port(txPort)) else: - itf = testerports - smac = self.tester.get_mac(self.tester.get_local_port(txPort)) - dmac = self.dut.get_mac_address(txPort) - Dut_tx_mac = self.dut.get_mac_address(rxPort) + itf = tgports + smac = self.tg_node.get_mac(self.tg_node.get_local_port(txPort)) + dmac = self.sut_node.get_mac_address(txPort) + Sut_tx_mac = self.sut_node.get_mac_address(rxPort) - self.tester.scapy_background() + self.tg_node.scapy_background() count = 1 # if only one port rx/tx, we should check count 2 so that both # rx and tx packet are list if txPort == rxPort: count = 2 - # ensure tester's link up + # ensure TG's link up self.verify( - self.tester.is_interface_up(intf=rxitf), "Wrong link status, should be up" + self.tg_node.is_interface_up(intf=rxitf), "Wrong link status, should be up" ) filter_list = [ {"layer": "ether", "config": {"type": "not IPv6"}}, {"layer": "userdefined", "config": {"pcap-filter": "not udp"}}, ] - inst = self.tester.tcpdump_sniff_packets( + inst = self.tg_node.tcpdump_sniff_packets( rxitf, count=count, filters=filter_list ) pktlen = pktSize - 14 padding = pktlen - 20 - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(src="%s", dst="%s")/IP()/Raw(load="P"*%s)], iface="%s", count=4)' % (smac, dmac, padding, txitf) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() - pkts = self.tester.load_tcpdump_sniff_packets(inst, timeout=2) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst, timeout=2) out = str(pkts[0].show) self.logger.info("SCAPY Result:\n" + out + "\n\n\n") if received: self.verify( - ("PPP" in out) and "src=%s" % Dut_tx_mac in out, "Receive test failed" + ("PPP" in out) and "src=%s" % Sut_tx_mac in out, "Receive test failed" ) else: self.verify("PPP" not in out, "Receive test failed") @@ -117,7 +117,7 @@ class TestShortLiveApp(TestCase): process_file = "/var/run/dpdk/rte/config" delay = 0 while delay < delay_max: - process = self.dut.send_expect("lsof %s | wc -l" % process_file, "# ") + process = self.sut_node.send_expect("lsof %s | wc -l" % process_file, "# ") # as FUSE filesystem may not be accessible for root, so the output might include some warning info res = process.splitlines()[-1].strip() if res != "0": @@ -137,17 +137,17 @@ class TestShortLiveApp(TestCase): Basic rx/tx forwarding test """ # dpdk start - self.dut.send_expect( + self.sut_node.send_expect( "./%s %s -- -i --portmask=0x3" % (self.app_testpmd, self.eal_para()), "testpmd>", 120, ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # check the ports are UP before sending packets - self.pmd_out = PmdOutput(self.dut) + self.pmd_out = PmdOutput(self.sut_node) res = self.pmd_out.wait_link_status_up("all", 30) self.verify(res is True, "there have port link is down") @@ -159,8 +159,8 @@ class TestShortLiveApp(TestCase): """ time = [] regex = re.compile(".* (\d+:\d{2}\.\d{2}).*") - eal_para = self.dut.create_eal_parameters(no_pci=True) - out = self.dut.send_expect( + eal_para = self.sut_node.create_eal_parameters(no_pci=True) + out = self.sut_node.send_expect( "echo quit | time ./%s %s -- -i" % (self.app_testpmd, eal_para), "# ", 120 ) time = regex.findall(out) @@ -175,17 +175,17 @@ class TestShortLiveApp(TestCase): for i in range(repeat_time): # dpdk start print("clean_up_with_signal_testpmd round %d" % (i + 1)) - self.dut.send_expect( + self.sut_node.send_expect( "./%s %s -- -i --portmask=0x3" % (self.app_testpmd, self.eal_para()), "testpmd>", 120, ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # check the ports are UP before sending packets - self.pmd_out = PmdOutput(self.dut) + self.pmd_out = PmdOutput(self.sut_node) res = self.pmd_out.wait_link_status_up("all", 30) self.verify(res is True, "there have port link is down") @@ -193,9 +193,9 @@ class TestShortLiveApp(TestCase): # kill with different Signal if i % 2 == 0: - self.dut.send_expect("pkill -2 testpmd", "# ", 60, True) + self.sut_node.send_expect("pkill -2 testpmd", "# ", 60, True) else: - self.dut.send_expect("pkill -15 testpmd", "# ", 60, True) + self.sut_node.send_expect("pkill -15 testpmd", "# ", 60, True) self.check_process() def test_clean_up_with_signal_l2fwd(self): @@ -204,7 +204,7 @@ class TestShortLiveApp(TestCase): for i in range(repeat_time): # dpdk start print("clean_up_with_signal_l2fwd round %d" % (i + 1)) - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -p 0x3 &" % (self.app_l2fwd_path, self.eal_para()), "L2FWD: entering main loop", 60, @@ -213,9 +213,9 @@ class TestShortLiveApp(TestCase): # kill with different Signal if i % 2 == 0: - self.dut.send_expect("pkill -2 l2fwd", "Bye...", 60) + self.sut_node.send_expect("pkill -2 l2fwd", "Bye...", 60) else: - self.dut.send_expect("pkill -15 l2fwd", "Bye...", 60) + self.sut_node.send_expect("pkill -15 l2fwd", "Bye...", 60) self.check_process() def test_clean_up_with_signal_l3fwd(self): @@ -224,7 +224,7 @@ class TestShortLiveApp(TestCase): for i in range(repeat_time): # dpdk start print("clean_up_with_signal_l3fwd round %d" % (i + 1)) - self.dut.send_expect( + self.sut_node.send_expect( "%s %s -- -p 0x3 --config='(0,0,1),(1,0,2)' &" % (self.app_l3fwd_path, self.eal_para()), "L3FWD: entering main loop", @@ -234,9 +234,9 @@ class TestShortLiveApp(TestCase): # kill with different Signal if i % 2 == 0: - self.dut.send_expect("pkill -2 l3fwd", "Bye...", 60) + self.sut_node.send_expect("pkill -2 l3fwd", "Bye...", 60) else: - self.dut.send_expect("pkill -15 l3fwd", "Bye...", 60) + self.sut_node.send_expect("pkill -15 l3fwd", "Bye...", 60) self.check_process() def tear_down(self): @@ -244,13 +244,13 @@ class TestShortLiveApp(TestCase): Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.send_expect("rm -rf ./app/test-pmd/testpmd", "# ") - self.dut.send_expect("rm -rf ./app/test-pmd/*.o", "# ") - self.dut.send_expect("rm -rf ./app/test-pmd/build", "# ") + self.sut_node.kill_all() + self.sut_node.send_expect("rm -rf ./app/test-pmd/testpmd", "# ") + self.sut_node.send_expect("rm -rf ./app/test-pmd/*.o", "# ") + self.sut_node.send_expect("rm -rf ./app/test-pmd/build", "# ") diff --git a/tests/TestSuite_shutdown_api.py b/tests/TestSuite_shutdown_api.py index a0fdcc1d..364f1139 100644 --- a/tests/TestSuite_shutdown_api.py +++ b/tests/TestSuite_shutdown_api.py @@ -44,19 +44,19 @@ class TestShutdownApi(TestCase): """ Run at the start of each test suite. """ - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() self.verify(len(ports) >= 1, "Insufficient number of ports.") self.ports = ports[:1] - self.ports_socket = self.dut.get_numa_id(self.ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.ports[0]) for port in self.ports: - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %s" - % (self.tester.get_interface(self.tester.get_local_port(port)), 5000), + % (self.tg_node.get_interface(self.tg_node.get_local_port(port)), 5000), "# ", ) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.vm_env_done = False def set_up(self): @@ -64,11 +64,11 @@ class TestShutdownApi(TestCase): Run before each test case. """ if self._suite_result.test_case == "test_change_linkspeed_vf": - self.used_driver = self.dut.ports_info[0]["port"].get_nic_driver() + self.used_driver = self.sut_node.ports_info[0]["port"].get_nic_driver() driver_name = DRIVERS.get(self.nic) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.bind_nic_driver(self.dut_ports, driver=driver_name) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.bind_nic_driver(self.sut_ports, driver=driver_name) self.setup_vm_env(driver_name) def get_stats(self, portid): @@ -76,7 +76,7 @@ class TestShutdownApi(TestCase): Get packets number from port statistic. @param: stop -- stop forward before get stats """ - output = PmdOutput(self.dut) + output = PmdOutput(self.sut_node) stats = output.get_pmd_stats(portid) return stats @@ -93,12 +93,12 @@ class TestShutdownApi(TestCase): if ports is None: ports = self.ports if len(ports) == 1: - # check tester's link status before send pkg - tester_intf = self.tester.get_interface( - self.tester.get_local_port(ports[0]) + # check TG's link status before send pkg + tg_intf = self.tg_node.get_interface( + self.tg_node.get_local_port(ports[0]) ) self.verify( - self.tester.is_interface_up(intf=tester_intf), + self.tg_node.is_interface_up(intf=tg_intf), "Wrong link status, should be up", ) self.send_packet( @@ -141,9 +141,9 @@ class TestShutdownApi(TestCase): ] time.sleep(5) - itf = self.tester.get_interface(self.tester.get_local_port(rxPort)) - smac = self.tester.get_mac(self.tester.get_local_port(rxPort)) - dmac = self.dut.get_mac_address(rxPort) + itf = self.tg_node.get_interface(self.tg_node.get_local_port(rxPort)) + smac = self.tg_node.get_mac(self.tg_node.get_local_port(rxPort)) + dmac = self.sut_node.get_mac_address(rxPort) # when promisc is true, destination mac should be fake if promisc: @@ -165,9 +165,9 @@ class TestShutdownApi(TestCase): padding, ) - self.tester.scapy_foreground() - self.tester.scapy_append('sendp(%s, iface="%s", count=4)' % (pkg, itf)) - self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('sendp(%s, iface="%s", count=4)' % (pkg, itf)) + self.tg_node.scapy_execute() time.sleep(3) port0_stats = self.get_stats(txPort) @@ -220,18 +220,18 @@ class TestShutdownApi(TestCase): def check_ports(self, status=True): """ - Check link status of the ports from tester side. + Check link status of the ports from TG side. """ for port in self.ports: - tester_intf = self.tester.get_interface(self.tester.get_local_port(port)) + tg_intf = self.tg_node.get_interface(self.tg_node.get_local_port(port)) if status: self.verify( - self.tester.is_interface_up(intf=tester_intf), + self.tg_node.is_interface_up(intf=tg_intf), "Wrong link status, should be up", ) else: self.verify( - self.tester.is_interface_down(intf=tester_intf), + self.tg_node.is_interface_down(intf=tg_intf), "Wrong link status, should be down", ) @@ -273,35 +273,35 @@ class TestShutdownApi(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) time.sleep(1) vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} - self.host_intf = self.dut.ports_info[self.used_dut_port]["intf"] + self.host_intf = self.sut_node.ports_info[self.used_sut_port]["intf"] self.vf_mac = "00:01:23:45:67:89" - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# " ) # set up VM0 ENV - self.vm0 = QEMUKvm(self.dut, "vm0", "shutdown_api") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "shutdown_api") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm0_dut_ports = self.vm0_dut.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm0_dut) + self.vm0_sut_ports = self.vm0_sut.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm0_sut) self.vm_env_done = True @@ -310,22 +310,22 @@ class TestShutdownApi(TestCase): self.vm0_testpmd.quit() if getattr(self, "vm0", None): - if getattr(self, "vm0_dut", None): - self.vm0_dut.kill_all() + if getattr(self, "vm0_sut", None): + self.vm0_sut.kill_all() self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - if getattr(self, "used_dut_port", None) is not None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - self.used_dut_port = None + if getattr(self, "used_sut_port", None) is not None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + self.used_sut_port = None for port in self.ports: - self.dut.send_expect( - "ethtool -s %s autoneg on " % self.dut.ports_info[port]["intf"], "#" + self.sut_node.send_expect( + "ethtool -s %s autoneg on " % self.sut_node.ports_info[port]["intf"], "#" ) - self.bind_nic_driver(self.dut_ports, driver=self.used_driver or self.drivername) + self.bind_nic_driver(self.sut_ports, driver=self.used_driver or self.drivername) if not self.vm_env_done: return @@ -340,20 +340,20 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") self.check_forwarding(received=False) - self.dut.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port stop all", "testpmd> ", 100) if self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"]: self.check_ports(status=True) else: self.check_ports(status=False) - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() def test_set_promiscuousmode(self): @@ -369,12 +369,12 @@ class TestShutdownApi(TestCase): socket=self.ports_socket, ) - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("show config rxtx", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") try: self.check_forwarding(ports) @@ -385,17 +385,17 @@ class TestShutdownApi(TestCase): self.verify(False, e.message) self.check_forwarding(ports, received=False, promisc=True) - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("set promisc all on", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("show config rxtx", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("set promisc all on", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding(ports, promisc=True) self.check_forwarding(ports) - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) def test_set_allmulticast(self): """ @@ -410,49 +410,49 @@ class TestShutdownApi(TestCase): socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("set allmulti all off", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set allmulti all off", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding(ports) self.check_forwarding(ports, received=False, promisc=True) self.check_forwarding(ports, received=False, allmulti=True) - self.dut.send_expect("set allmulti all on", "testpmd> ") + self.sut_node.send_expect("set allmulti all on", "testpmd> ") self.check_forwarding(ports) self.check_forwarding(ports, allmulti=True) self.check_forwarding(ports, received=False, promisc=True) - self.dut.send_expect("set promisc all on", "testpmd> ") + self.sut_node.send_expect("set promisc all on", "testpmd> ") self.check_forwarding(ports) self.check_forwarding(ports, promisc=True) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_reset_queues(self): """ Reset RX/TX Queues. """ - testcorelist = self.dut.get_core_list("1S/8C/1T", socket=self.ports_socket) + testcorelist = self.sut_node.get_core_list("1S/8C/1T", socket=self.ports_socket) self.pmdout.start_testpmd( testcorelist, "--portmask=%s --port-topology=loop" % utils.create_mask([self.ports[0]]), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") fwdcoremask = utils.create_mask(testcorelist[-3:]) - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port config all rxq 2", "testpmd> ") - self.dut.send_expect("port config all txq 2", "testpmd> ") - self.dut.send_expect("set coremask %s" % fwdcoremask, "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port config all rxq 2", "testpmd> ") + self.sut_node.send_expect("port config all txq 2", "testpmd> ") + self.sut_node.send_expect("set coremask %s" % fwdcoremask, "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") self.verify("RX queue number: 2" in out, "RX queues not reconfigured properly") self.verify("Tx queue number: 2" in out, "TX queues not reconfigured properly") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("quit", "# ", 30) def test_reconfigure_ports(self): """ @@ -473,34 +473,34 @@ class TestShutdownApi(TestCase): % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd>") + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") Rx_offloads = re.compile("Rx offloads=(.*?)\s+?").findall(out, re.S) crc_keep_temp = [] - for i in range(len(self.dut.get_ports())): + for i in range(len(self.sut_node.get_ports())): crc_keep_temp.append(int(Rx_offloads[i], 16) & RX_OFFLOAD_KEEP_CRC) crc_keep = crc_keep_temp[0] crc_keep = crc_keep and crc_keep_temp[i] self.verify(crc_keep == RX_OFFLOAD_KEEP_CRC, "CRC keeping not enabled properly") - self.dut.send_expect("port stop all", "testpmd> ", 100) - for i in range(len(self.dut.get_ports())): - self.dut.send_expect( + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + for i in range(len(self.sut_node.get_ports())): + self.sut_node.send_expect( "port config %s rx_offload keep_crc off" % i, "testpmd> " ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") Rx_offloads = re.compile("Rx offloads=(.*?)\s+?").findall(out, re.S) crc_strip_temp = [] - for i in range(len(self.dut.get_ports())): + for i in range(len(self.sut_node.get_ports())): crc_strip_temp.append(int(Rx_offloads[i], 16) | ~RX_OFFLOAD_KEEP_CRC) crc_strip = crc_strip_temp[0] crc_strip = crc_strip and crc_strip_temp[i] self.verify( crc_strip == ~RX_OFFLOAD_KEEP_CRC, "CRC stripping not enabled properly" ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() def test_change_linkspeed(self): @@ -512,11 +512,11 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") - out = self.tester.send_expect( + out = self.tg_node.send_expect( "ethtool %s" - % self.tester.get_interface(self.tester.get_local_port(self.ports[0])), + % self.tg_node.get_interface(self.tg_node.get_local_port(self.ports[0])), "# ", ) @@ -540,78 +540,78 @@ class TestShutdownApi(TestCase): elif self.nic in ["IXGBE_10G-X550EM_X_10G_T"]: if config[0] not in ["1000", "10000"]: continue - self.dut.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port stop all", "testpmd> ", 100) for port in self.ports: - self.dut.send_expect( + self.sut_node.send_expect( "port config %d speed %s duplex %s" % (port, config[0], config[1].lower()), "testpmd> ", ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) # wait NIC link status stable within 15s self.pmdout.wait_link_status_up("all") for port in self.ports: - out = self.dut.send_expect("show port info %s" % port, "testpmd>") + out = self.sut_node.send_expect("show port info %s" % port, "testpmd>") self.verify( - "Link status: up" in out, "Wrong link status reported by the dut" + "Link status: up" in out, "Wrong link status reported by the SUT" ) if int(config[0]) < 1000: self.verify( "Link speed: %s Mbps" % config[0] in out, - "Wrong speed reported by the dut", + "Wrong speed reported by the SUT", ) else: num = int(int(config[0]) / 1000) self.verify( "Link speed: %d Gbps" % num in out, - "Wrong speed reported by the dut", + "Wrong speed reported by the SUT", ) self.verify( "Link duplex: %s-duplex" % config[1].lower() in out, - "Wrong link type reported by the dut", + "Wrong link type reported by the SUT", ) - out = self.tester.send_expect( + out = self.tg_node.send_expect( "ethtool %s" - % self.tester.get_interface(self.tester.get_local_port(port)), + % self.tg_node.get_interface(self.tg_node.get_local_port(port)), "# ", ) self.verify( "Speed: %sMb/s" % config[0] in out, - "Wrong speed reported by the self.tester.", + "Wrong speed reported by the self.tg_node.", ) self.verify( "Duplex: %s" % config[1] in out, - "Wrong link type reported by the self.tester.", + "Wrong link type reported by the self.tg_node.", ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") def test_change_linkspeed_vf(self): """ Change Link Speed VF . """ self.check_vf_link_status() - out = self.tester.send_expect( + out = self.tg_node.send_expect( "ethtool %s" - % self.tester.get_interface(self.tester.get_local_port(self.ports[0])), + % self.tg_node.get_interface(self.tg_node.get_local_port(self.ports[0])), "# ", 100, ) - dut_out = self.dut.send_expect( - "ethtool %s" % self.dut.ports_info[0]["intf"], "# ", 100 + sut_out = self.sut_node.send_expect( + "ethtool %s" % self.sut_node.ports_info[0]["intf"], "# ", 100 ) check_auto_negotiation = "Supports auto-negotiation: Yes" self.verify( - check_auto_negotiation in out and check_auto_negotiation in dut_out, - "tester or dut Auto-negotiation not support.", + check_auto_negotiation in out and check_auto_negotiation in sut_out, + "TG or SUT Auto-negotiation not support.", ) result_scanner = r"([0-9]+)base\S*/([A-Za-z]+)" scanner = re.compile(result_scanner, re.DOTALL) - m = scanner.findall(dut_out) + m = scanner.findall(sut_out) configs = m[: -int(len(m) / 2)] if not self.check_linkspeed_config(configs): @@ -628,26 +628,26 @@ class TestShutdownApi(TestCase): print(config) for port in self.ports: if len(configs) != 1: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool -s %s autoneg off speed %s duplex %s" % ( - self.dut.ports_info[port]["intf"], + self.sut_node.ports_info[port]["intf"], config[0], config[1].lower(), ), "#", ) time.sleep(5) - self.tester_itf_1 = self.tester.get_interface(port) + self.tg_itf_1 = self.tg_node.get_interface(port) pkt = ( 'Ether(dst="%s", src="02:00:00:00:00:01")/IP()/UDP()/("X"*22)' % self.vf_mac ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s", count=%d)' - % (pkt, self.tester_intf, tx_rx_packets) + % (pkt, self.tg_intf, tx_rx_packets) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(2) out = self.vm0_testpmd.execute_cmd("show port stats all") self.verify( @@ -669,11 +669,11 @@ class TestShutdownApi(TestCase): actual_speed = actual_speed * 1000 self.verify( config[0] == str(actual_speed), - "Wrong VF speed reported by the self.tester.", + "Wrong VF speed reported by the self.tg_node.", ) self.verify( config[1].lower() == linktype[0].lower(), - "Wrong VF link type reported by the self.tester.", + "Wrong VF link type reported by the self.tg_node.", ) def test_enable_disablejumbo(self): @@ -686,17 +686,17 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port config all max-pkt-len %d" % jumbo_size, "testpmd> ") - out = self.dut.send_expect("vlan set strip off all", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port config all max-pkt-len %d" % jumbo_size, "testpmd> ") + out = self.sut_node.send_expect("vlan set strip off all", "testpmd> ") if "fail" not in out: for port in self.ports: - self.dut.send_expect("vlan set filter on %d" % port, "testpmd> ") - self.dut.send_expect("rx_vlan add 1 %d" % port, "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("vlan set filter on %d" % port, "testpmd> ") + self.sut_node.send_expect("rx_vlan add 1 %d" % port, "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ") if self.nic in [ "IXGBE_10G-X550EM_A_SFP", @@ -712,7 +712,7 @@ class TestShutdownApi(TestCase): vlan_jumbo_size = jumbo_size + 4 else: vlan_jumbo_size = jumbo_size - out = self.dut.send_expect( + out = self.sut_node.send_expect( "show port %d rx_offload configuration" % port, "testpmd> " ) if "VLAN_STRIP" in out: @@ -732,12 +732,12 @@ class TestShutdownApi(TestCase): vlan_strip=vlan_strip, ) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port config all hw-vlan off", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port config all hw-vlan off", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ") """ On 1G NICs, when the jubmo frame MTU set as X, the software adjust it to (X + 4). """ @@ -756,13 +756,13 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port config rss ip", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port config rss ip", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() def test_change_numberrxdtxd(self): @@ -774,17 +774,17 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") - - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port config all rxd 1024", "testpmd> ") - self.dut.send_expect("port config all txd 1024", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd>") + + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port config all rxd 1024", "testpmd> ") + self.sut_node.send_expect("port config all txd 1024", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") self.verify("RX desc=1024" in out, "RX descriptor not reconfigured properly") self.verify("TX desc=1024" in out, "TX descriptor not reconfigured properly") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() def test_change_numberrxdtxdaftercycle(self): @@ -796,26 +796,26 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") - - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port config all rxd 1024", "testpmd> ") - self.dut.send_expect("port config all txd 1024", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd>") + + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port config all rxd 1024", "testpmd> ") + self.sut_node.send_expect("port config all txd 1024", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") self.verify("RX desc=1024" in out, "RX descriptor not reconfigured properly") self.verify("TX desc=1024" in out, "TX descriptor not reconfigured properly") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("port start all", "testpmd> ", 100) - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port start all", "testpmd> ", 100) + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") self.verify("RX desc=1024" in out, "RX descriptor not reconfigured properly") self.verify("TX desc=1024" in out, "TX descriptor not reconfigured properly") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() def test_change_thresholds(self): @@ -830,27 +830,27 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("port stop all", "testpmd> ", 100) if self.nic in [ "IXGBE_10G-X550EM_X_10G_T", "IXGBE_10G-X550T", "IXGBE_10G-X540T", "IXGBE_10G-82599_SFP", ]: - self.dut.send_expect("port config all txfreet 32", "testpmd> ") - self.dut.send_expect("port config all txrst 32", "testpmd> ") - self.dut.send_expect("port config all rxfreet 32", "testpmd> ") - self.dut.send_expect("port config all txpt 64", "testpmd> ") - self.dut.send_expect("port config all txht 64", "testpmd> ") + self.sut_node.send_expect("port config all txfreet 32", "testpmd> ") + self.sut_node.send_expect("port config all txrst 32", "testpmd> ") + self.sut_node.send_expect("port config all rxfreet 32", "testpmd> ") + self.sut_node.send_expect("port config all txpt 64", "testpmd> ") + self.sut_node.send_expect("port config all txht 64", "testpmd> ") if self.nic in ["IGC-I225_LM"]: - self.dut.send_expect("port config all txwt 16", "testpmd> ") + self.sut_node.send_expect("port config all txwt 16", "testpmd> ") else: - self.dut.send_expect("port config all txwt 0", "testpmd> ") + self.sut_node.send_expect("port config all txwt 0", "testpmd> ") - self.dut.send_expect("port start all", "testpmd> ", 100) - out = self.dut.send_expect("show config rxtx", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + out = self.sut_node.send_expect("show config rxtx", "testpmd> ") self.verify( "RX free threshold=32" in out, "RX descriptor not reconfigured properly" ) @@ -873,8 +873,8 @@ class TestShutdownApi(TestCase): self.verify("wthresh=16" in out, "TX descriptor not reconfigured properly") else: self.verify("wthresh=0" in out, "TX descriptor not reconfigured properly") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() def test_stress_test(self): @@ -888,31 +888,31 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") tgenInput = [] for port in self.ports: - dmac = self.dut.get_mac_address(port) - self.tester.scapy_append( + dmac = self.sut_node.get_mac_address(port) + self.tg_node.scapy_append( 'wrpcap("test%d.pcap",[Ether(src="02:00:00:00:00:0%d",dst="%s")/IP()/UDP()/()])' % (port, port, dmac) ) tgenInput.append( ( - self.tester.get_local_port(port), - self.tester.get_local_port(port), + self.tg_node.get_local_port(port), + self.tg_node.get_local_port(port), "test%d.pcap" % port, ) ) for _ in range(stress_iterations): - self.dut.send_expect("port stop all", "testpmd> ", 100) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ", 100) + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("start", "testpmd> ") self.check_forwarding() - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_link_stats(self): """ @@ -923,20 +923,20 @@ class TestShutdownApi(TestCase): "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), socket=self.ports_socket, ) - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") ports_num = len(self.ports) # link down test for i in range(ports_num): - self.dut.send_expect("set link-down port %d" % i, "testpmd>") + self.sut_node.send_expect("set link-down port %d" % i, "testpmd>") # check NIC link status update within 15s self.check_ports(status=False) # link up test for j in range(ports_num): - self.dut.send_expect("set link-up port %d" % j, "testpmd>") + self.sut_node.send_expect("set link-up port %d" % j, "testpmd>") self.check_ports(status=True) self.check_forwarding() @@ -961,7 +961,7 @@ class TestShutdownApi(TestCase): for i in range(3): rxqid = randint(0, queue_num - 1) self.desc = randint(0, 4095) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "show port %s rxq %s desc %s status" % (self.ports[0], rxqid, self.desc), "testpmd> ", @@ -978,7 +978,7 @@ class TestShutdownApi(TestCase): ) txqid = randint(0, queue_num - 1) self.desc = randint(0, 511) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "show port %s txq %s desc %s status" % (self.ports[0], txqid, self.desc), "testpmd> ", @@ -1000,7 +1000,7 @@ class TestShutdownApi(TestCase): """ if self._suite_result.test_case == "test_change_linkspeed_vf": self.destroy_vm_env() - self.dut.kill_all() + self.sut_node.kill_all() self.pmdout.start_testpmd( "Default", "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), @@ -1010,13 +1010,13 @@ class TestShutdownApi(TestCase): # link up test, to avoid failing further tests if link was down for i in range(ports_num): ## sometimes output text messingup testpmd prompt so trimmed prompt - self.dut.send_expect("set link-up port %d" % i, ">") + self.sut_node.send_expect("set link-up port %d" % i, ">") # start ports, to avodi failing further tests if ports are stoped - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_skeleton.py b/tests/TestSuite_skeleton.py index 6e29011b..a7875b2d 100644 --- a/tests/TestSuite_skeleton.py +++ b/tests/TestSuite_skeleton.py @@ -19,16 +19,16 @@ class TestSkeleton(TestCase): Run at the start of each test suite. timer prerequistites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") - cores = self.dut.get_core_list("1S/2C/1T") + cores = self.sut_node.get_core_list("1S/2C/1T") self.coremask = utils.create_mask(cores) - self.mac = self.dut.get_mac_address(self.dut_ports[0]) - self.app_skeleton_path = self.dut.apps_name["skeleton"] + self.mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.app_skeleton_path = self.sut_node.apps_name["skeleton"] self.path = "./%s/build/basicfwd" % self.app_skeleton_path - out = self.dut.build_dpdk_apps("./examples/skeleton") + out = self.sut_node.build_dpdk_apps("./examples/skeleton") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -39,20 +39,20 @@ class TestSkeleton(TestCase): pass def test_skeleton(self): - eal_para = self.dut.create_eal_parameters(cores="1S/2C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/2C/1T") cmd = self.path + " %s " % eal_para - self.dut.send_expect(cmd, "forwarding packets", 60) + self.sut_node.send_expect(cmd, "forwarding packets", 60) time.sleep(5) - self.iface_port0 = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.iface_port0 = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - self.iface_port1 = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + self.iface_port1 = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) - self.inst_port1 = self.tester.tcpdump_sniff_packets(self.iface_port1) + self.inst_port1 = self.tg_node.tcpdump_sniff_packets(self.iface_port1) self.scapy_send_packet(self.iface_port0) out_port1 = self.get_tcpdump_package(self.inst_port1) @@ -64,14 +64,14 @@ class TestSkeleton(TestCase): """ Send a packet to port """ - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s")/IP()/UDP()/Raw(\'X\'*18)], iface="%s", count=4)' % (self.mac, iface) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def get_tcpdump_package(self, inst): - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) dsts = [] for i in range(len(pkts)): dst = pkts.strip_element_layer2("dst", p_index=i) @@ -82,7 +82,7 @@ class TestSkeleton(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_softnic.py b/tests/TestSuite_softnic.py index e379125d..658982d4 100644 --- a/tests/TestSuite_softnic.py +++ b/tests/TestSuite_softnic.py @@ -13,31 +13,31 @@ import string import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestSoftnic(TestCase): def set_up_all(self): # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() - self.dut_ports = self.dut.get_ports(self.nic) + ports = self.sut_node.get_ports() + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available self.verify(len(ports) >= 1, "Insufficient ports for testing") - self.def_driver = self.dut.ports_info[ports[0]]["port"].get_nic_driver() - self.ports_socket = self.dut.get_numa_id(ports[0]) + self.def_driver = self.sut_node.ports_info[ports[0]]["port"].get_nic_driver() + self.ports_socket = self.sut_node.get_numa_id(ports[0]) # Verify that enough threads are available - cores = self.dut.get_core_list("1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") self.verify(cores is not None, "Insufficient cores for speed testing") global P0 P0 = ports[0] - self.txItf = self.tester.get_interface(self.tester.get_local_port(P0)) - self.dmac = self.dut.get_mac_address(P0) + self.txItf = self.tg_node.get_interface(self.tg_node.get_local_port(P0)) + self.dmac = self.sut_node.get_mac_address(P0) self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["udp"] # need change config files @@ -45,12 +45,12 @@ class TestSoftnic(TestCase): self.firmware = r"dep/firmware.cli" self.tm_firmware = r"dep/tm_firmware.cli" self.nat_firmware = r"dep/nat_firmware.cli" - self.dut.session.copy_file_to(self.firmware, self.root_path) - self.dut.session.copy_file_to(self.tm_firmware, self.root_path) - self.dut.session.copy_file_to(self.nat_firmware, self.root_path) - self.eal_param = " -a %s" % self.dut.ports_info[0]["pci"] - self.path = self.dut.apps_name["test-pmd"] - self.pmdout = PmdOutput(self.dut) + self.sut_node.session.copy_file_to(self.firmware, self.root_path) + self.sut_node.session.copy_file_to(self.tm_firmware, self.root_path) + self.sut_node.session.copy_file_to(self.nat_firmware, self.root_path) + self.eal_param = " -a %s" % self.sut_node.ports_info[0]["pci"] + self.path = self.sut_node.apps_name["test-pmd"] + self.pmdout = PmdOutput(self.sut_node) # get dts output path if self.logger.log_path.startswith(os.sep): self.output_path = self.logger.log_path @@ -58,8 +58,8 @@ class TestSoftnic(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.dut.bind_interfaces_linux(self.drivername, [ports[0]]) + self.pktgen_helper = TrafficGeneratorStream() + self.sut_node.bind_interfaces_linux(self.drivername, [ports[0]]) def set_up(self): """ @@ -67,12 +67,12 @@ class TestSoftnic(TestCase): """ def change_config_file(self, file_name): - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e '4c link LINK0 dev %s' %s" - % (self.dut.ports_info[0]["pci"], self.root_path + file_name), + % (self.sut_node.ports_info[0]["pci"], self.root_path + file_name), "#", ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/thread [0-9]/thread 2/g' %s" % self.root_path + file_name, "#" ) @@ -88,9 +88,9 @@ class TestSoftnic(TestCase): eal_param="-s 0x4 %s --vdev 'net_softnic0,firmware=/tmp/%s,cpu_id=1,conn_port=8086'" % (self.eal_param, "firmware.cli"), ) - self.dut.send_expect("start", "testpmd>") - rx_port = self.tester.get_local_port(0) - tx_port = self.tester.get_local_port(0) + self.sut_node.send_expect("start", "testpmd>") + rx_port = self.tg_node.get_local_port(0) + tx_port = self.tg_node.get_local_port(0) n = 0 for frame in self.frame_size: payload_size = frame - self.headers_size @@ -100,16 +100,16 @@ class TestSoftnic(TestCase): self.dmac, payload_size, ) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, pkt)) + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, pkt)) tgen_input.append((tx_port, rx_port, pcap)) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) pps = pps / 1000000.0 self.verify(pps > 0, "No traffic detected") self.verify(pps > expect_pps[n], "No traffic detected") @@ -123,8 +123,8 @@ class TestSoftnic(TestCase): eal_param="-s 0x4 %s --vdev 'net_softnic0,firmware=/tmp/%s,cpu_id=1,conn_port=8086'" % (self.eal_param, "tm_firmware.cli"), ) - self.dut.send_expect("start", "testpmd>") - rx_port = self.tester.get_local_port(0) + self.sut_node.send_expect("start", "testpmd>") + rx_port = self.tg_node.get_local_port(0) pkts = [ "Ether(dst='%s')/IP(dst='100.0.0.0')/UDP()/Raw(load='x'*(64 - %s))", "Ether(dst='%s')/IP(dst='100.0.15.255')/UDP()/Raw(load='x'*(64 - %s))", @@ -136,30 +136,30 @@ class TestSoftnic(TestCase): tgen_input = [] pcap = os.sep.join([self.output_path, "test.pcap"]) pkt = pkts[i] % (self.dmac, self.headers_size) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, pkt)) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, pkt)) + self.tg_node.scapy_execute() if i == 2: for j in range(16): pk = ( "Ether(dst='%s')/IP(dst='100.0.15.%d')/UDP()/Raw(load='x'*(64 - %s))" % (self.dmac, j, self.headers_size) ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s/test_%d.pcap", [%s])' % (self.output_path, j, pk) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() tgen_input.append( (rx_port, rx_port, "%s/test_%d.pcap" % (self.output_path, j)) ) else: tgen_input.append((rx_port, rx_port, pcap)) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - bps, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + bps, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) if i == 2: self.verify( except_bps_range[1] * 16 > bps > except_bps_range[0] * 16, @@ -179,7 +179,7 @@ class TestSoftnic(TestCase): pkt_type = ["tcp", "udp"] for t in pkt_type: for i in range(2): - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e '12c table action profile AP0 ipv4 offset 270 fwd nat %s proto %s' %s" % (pkt_location[i], t, self.root_path + "nat_firmware.cli"), "#", @@ -195,40 +195,40 @@ class TestSoftnic(TestCase): "ICE_25G-E810C_SFP", "ICE_25G-E810_XXV_SFP", ]: - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") # src ip tcp for j in range(2): out = self.scapy_send_packet(pkt_location[i], ips[j], t) self.verify(expect_ips[j] in out, "fail to receive expect packet") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(1) def scapy_send_packet(self, pkt_location, ip, pkt_type): - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() pkt = "Ether(dst='%s')/IP(dst='%s')/" % (self.dmac, ip) if pkt_type == "tcp": pkt = pkt + "TCP()/Raw(load='x'*20)" else: pkt = pkt + "UDP()/Raw(load='x'*20)" - self.tester.scapy_append('sendp([%s], iface="%s")' % (pkt, self.txItf)) + self.tg_node.scapy_append('sendp([%s], iface="%s")' % (pkt, self.txItf)) self.start_tcpdump(self.txItf) - self.tester.scapy_execute() + self.tg_node.scapy_execute() out = self.get_tcpdump_package() return out def get_tcpdump_package(self): time.sleep(4) - self.tester.send_expect("killall tcpdump", "#") - out = self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + out = self.tg_node.send_expect( "tcpdump -A -nn -e -vv -r getPackageByTcpdump.cap |grep '192.168'", "#" ) return out def start_tcpdump(self, rxItf): - self.tester.send_expect("rm -rf getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -A -nn -e -vv -w getPackageByTcpdump.cap -i %s 2> /dev/null& " % self.txItf, "#", @@ -239,12 +239,12 @@ class TestSoftnic(TestCase): """ Run after each test case. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): """ Run after each test suite. """ - self.dut.bind_interfaces_linux( - driver=self.def_driver, nics_to_bind=self.dut.get_ports() + self.sut_node.bind_interfaces_linux( + driver=self.def_driver, nics_to_bind=self.sut_node.get_ports() ) diff --git a/tests/TestSuite_speed_capabilities.py b/tests/TestSuite_speed_capabilities.py index 3bda904e..6299b8bf 100644 --- a/tests/TestSuite_speed_capabilities.py +++ b/tests/TestSuite_speed_capabilities.py @@ -16,16 +16,16 @@ class TestSpeedCapabilities(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.ports[0]) + self.ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.ports[0]) for port in self.ports: - self.tester.send_expect( - f"ifconfig {self.tester.get_interface(self.tester.get_local_port(port))} mtu 5000", + self.tg_node.send_expect( + f"ifconfig {self.tg_node.get_interface(self.tg_node.get_local_port(port))} mtu 5000", "# ", ) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.vm_env_done = False def test_speed_capabilities(self): @@ -37,7 +37,7 @@ class TestSpeedCapabilities(TestCase): detected_interfaces = [] for port in self.ports: - interface_name = self.tester.get_interface(self.tester.get_local_port(port)) + interface_name = self.tg_node.get_interface(self.tg_node.get_local_port(port)) # Gives the speed in Mb/s interface_speed = self.pmdout.get_port_link_speed(port) @@ -84,7 +84,7 @@ class TestSpeedCapabilities(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.pmdout.start_testpmd( "Default", "--portmask=%s --port-topology=loop" % utils.create_mask(self.ports), @@ -94,10 +94,10 @@ class TestSpeedCapabilities(TestCase): # link up test, to avoid failing further tests if link was down for i in range(ports_num): # sometimes output text messing up testpmd prompt so trimmed prompt - self.dut.send_expect("set link-up port %d" % i, ">") + self.sut_node.send_expect("set link-up port %d" % i, ">") # start ports, to avoid failing further tests if ports are stopped - self.dut.send_expect("port start all", "testpmd> ", 100) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("port start all", "testpmd> ", 100) + self.sut_node.send_expect("quit", "# ") def tear_down_all(self): """ @@ -105,4 +105,4 @@ class TestSpeedCapabilities(TestCase): """ if self.vm_env_done: self.destroy_vm_env() - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_sriov_kvm.py b/tests/TestSuite_sriov_kvm.py index c496b30d..45c82844 100644 --- a/tests/TestSuite_sriov_kvm.py +++ b/tests/TestSuite_sriov_kvm.py @@ -35,8 +35,8 @@ class TestSriovKvm(TestCase): # example: # port_mirror_ref = {0: 1, 1: 3} self.port_mirror_ref = {} - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None self.vm1 = None @@ -49,7 +49,7 @@ class TestSriovKvm(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.setup_2vm_2vf_env_flag = 0 self.setup_2vm_prerequisite_flag = 0 self.vm0_testpmd = None @@ -60,12 +60,12 @@ class TestSriovKvm(TestCase): self.setup_two_vm_common_prerequisite() - def get_stats(self, dut, portid, rx_tx): + def get_stats(self, sut, portid, rx_tx): """ Get packets number from port statistic """ - stats = dut.testpmd.get_pmd_stats(portid) + stats = sut.testpmd.get_pmd_stats(portid) if rx_tx == "rx": stats_result = [stats["RX-packets"], stats["RX-missed"], stats["RX-bytes"]] @@ -76,10 +76,10 @@ class TestSriovKvm(TestCase): return stats_result - def parse_ether_ip(self, dut, dut_ports, dest_port, **ether_ip): + def parse_ether_ip(self, sut, sut_ports, dest_port, **ether_ip): """ - dut: which you want to send packet to - dest_port: the port num must be the index of dut.get_ports() + sut: which you want to send packet to + dest_port: the port num must be the index of sut.get_ports() ether_ip: 'ether': { @@ -108,22 +108,22 @@ class TestSriovKvm(TestCase): udp = {} try: - dut_dest_port = dut_ports[dest_port] + sut_dest_port = sut_ports[dest_port] except Exception as e: print(e) - # using api get_local_port() to get the correct tester port. - tester_port = self.tester.get_local_port(dut_dest_port) + # using api get_local_port() to get the correct TG port. + tg_port = self.tg_node.get_local_port(sut_dest_port) if not ether_ip.get("ether"): - ether["dest_mac"] = PmdOutput(dut).get_port_mac(dut_dest_port) - ether["src_mac"] = dut.tester.get_mac(tester_port) + ether["dest_mac"] = PmdOutput(sut).get_port_mac(sut_dest_port) + ether["src_mac"] = sut.tg_node.get_mac(tg_port) else: if not ether_ip["ether"].get("dest_mac"): - ether["dest_mac"] = PmdOutput(dut).get_port_mac(dut_dest_port) + ether["dest_mac"] = PmdOutput(sut).get_port_mac(sut_dest_port) else: ether["dest_mac"] = ether_ip["ether"]["dest_mac"] if not ether_ip["ether"].get("src_mac"): - ether["src_mac"] = dut.tester.get_mac(tester_port) + ether["src_mac"] = sut.tg_node.get_mac(tg_port) else: ether["src_mac"] = ether_ip["ether"]["src_mac"] @@ -170,8 +170,8 @@ class TestSriovKvm(TestCase): def send_packet( self, - dut, - dut_ports, + sut, + sut_ports, dest_port, src_port=False, frame_size=FRAME_SIZE_64, @@ -181,8 +181,8 @@ class TestSriovKvm(TestCase): ): """ Send count packet to portid - dut: which you want to send packet to - dest_port: the port num must be the index of dut.get_ports() + sut: which you want to send packet to + dest_port: the port num must be the index of sut.get_ports() count: 1 or 2 or 3 or ... or 'MANY' if count is 'MANY', then set count=1000, send packets during 5 seconds. @@ -219,34 +219,34 @@ class TestSriovKvm(TestCase): raise e gp0rx_pkts, gp0rx_err, gp0rx_bytes = [ - int(_) for _ in self.get_stats(dut, dest_port, "rx") + int(_) for _ in self.get_stats(sut, dest_port, "rx") ] if not src_port: - itf = self.tester.get_interface( - self.dut.ports_map[self.dut_ports[dest_port]] + itf = self.tg_node.get_interface( + self.sut_node.ports_map[self.sut_ports[dest_port]] ) else: itf = src_port - ret_ether_ip = self.parse_ether_ip(dut, dut_ports, dest_port, **ether_ip) + ret_ether_ip = self.parse_ether_ip(sut, sut_ports, dest_port, **ether_ip) pktlen = frame_size - 18 padding = pktlen - 20 start = time.time() while True: - self.tester.scapy_foreground() - self.tester.scapy_append('nutmac="%s"' % ret_ether_ip["ether"]["dest_mac"]) - self.tester.scapy_append('srcmac="%s"' % ret_ether_ip["ether"]["src_mac"]) + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('nutmac="%s"' % ret_ether_ip["ether"]["dest_mac"]) + self.tg_node.scapy_append('srcmac="%s"' % ret_ether_ip["ether"]["src_mac"]) if ether_ip.get("vlan"): - self.tester.scapy_append( + self.tg_node.scapy_append( "vlanvalue=%d" % int(ret_ether_ip["vlan"]["vlan"]) ) - self.tester.scapy_append('destip="%s"' % ret_ether_ip["ip"]["dest_ip"]) - self.tester.scapy_append('srcip="%s"' % ret_ether_ip["ip"]["src_ip"]) - self.tester.scapy_append("destport=%d" % ret_ether_ip["udp"]["dest_port"]) - self.tester.scapy_append("srcport=%d" % ret_ether_ip["udp"]["src_port"]) + self.tg_node.scapy_append('destip="%s"' % ret_ether_ip["ip"]["dest_ip"]) + self.tg_node.scapy_append('srcip="%s"' % ret_ether_ip["ip"]["src_ip"]) + self.tg_node.scapy_append("destport=%d" % ret_ether_ip["udp"]["dest_port"]) + self.tg_node.scapy_append("srcport=%d" % ret_ether_ip["udp"]["src_port"]) if not ret_ether_ip.get("vlan"): send_cmd = ( "sendp([Ether(dst=nutmac, src=srcmac)/" @@ -263,9 +263,9 @@ class TestSriovKvm(TestCase): + 'Raw(load="\x50"*%s)], iface="%s", count=%d)' % (padding, itf, count) ) - self.tester.scapy_append(send_cmd) + self.tg_node.scapy_append(send_cmd) - self.tester.scapy_execute() + self.tg_node.scapy_execute() loop += 1 now = time.time() @@ -274,7 +274,7 @@ class TestSriovKvm(TestCase): time.sleep(0.5) p0rx_pkts, p0rx_err, p0rx_bytes = [ - int(_) for _ in self.get_stats(dut, dest_port, "rx") + int(_) for _ in self.get_stats(sut, dest_port, "rx") ] p0rx_pkts -= gp0rx_pkts @@ -290,10 +290,10 @@ class TestSriovKvm(TestCase): return count * loop def setup_2vm_2vf_env(self, driver="igb_uio"): - self.used_dut_port = self.dut_ports[0] + self.used_sut_port = self.sut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] try: for port in self.sriov_vfs_port: @@ -304,16 +304,16 @@ class TestSriovKvm(TestCase): vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} vf1_prop = {"opt_host": self.sriov_vfs_port[1].pci} - for port_id in self.dut_ports: - if port_id == self.used_dut_port: + for port_id in self.sut_ports: + if port_id == self.used_sut_port: continue - port = self.dut.ports_info[port_id]["port"] + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() if driver == "igb_uio": # start testpmd with the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) - eal_param = "-a %s " % self.dut.ports_info[0]["pci"] + self.host_testpmd = PmdOutput(self.sut_node) + eal_param = "-a %s " % self.sut_node.ports_info[0]["pci"] self.host_testpmd.start_testpmd( "1S/2C/2T", "--rxq=4 --txq=4", eal_param=eal_param ) @@ -321,17 +321,17 @@ class TestSriovKvm(TestCase): self.host_testpmd.execute_cmd("start") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "sriov_kvm") + self.vm0 = VM(self.sut_node, "vm0", "sriov_kvm") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") # set up VM1 ENV - self.vm1 = VM(self.dut, "vm1", "sriov_kvm") + self.vm1 = VM(self.sut_node, "vm1", "sriov_kvm") self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_1 = self.vm1.start() - if self.vm_dut_1 is None: + self.vm_sut_1 = self.vm1.start() + if self.vm_sut_1 is None: raise Exception("Set up VM1 ENV failed!") self.setup_2vm_2vf_env_flag = 1 @@ -352,16 +352,16 @@ class TestSriovKvm(TestCase): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - self.dut.virt_exit() + self.sut_node.virt_exit() - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver("igb_uio") - self.used_dut_port = None + self.used_sut_port = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver("igb_uio") self.setup_2vm_2vf_env_flag = 0 @@ -423,11 +423,11 @@ class TestSriovKvm(TestCase): if self.vm1_testpmd: self.vm1_testpmd.quit() self.vm1_testpmd = None - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) - self.vm1_dut_ports = self.vm_dut_1.get_ports("any") - self.vm1_testpmd = PmdOutput(self.vm_dut_1) + self.vm1_sut_ports = self.vm_sut_1.get_ports("any") + self.vm1_testpmd = PmdOutput(self.vm_sut_1) self.vm1_testpmd.start_testpmd(VM_CORES_MASK) self.setup_2vm_prerequisite_flag = 1 @@ -440,12 +440,12 @@ class TestSriovKvm(TestCase): def destroy_two_vm_common_prerequisite(self): self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None self.vm0_testpmd = None - self.vm1_dut_ports = None + self.vm1_sut_ports = None - self.dut.virt_exit() + self.sut_node.virt_exit() self.setup_2vm_prerequisite_flag = 0 @@ -453,19 +453,19 @@ class TestSriovKvm(TestCase): if self.setup_2vm_prerequisite_flag == 1: self.vm0_testpmd.execute_cmd("quit", "# ") self.vm1_testpmd.execute_cmd("quit", "# ") - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm1_dut_ports = self.vm_dut_1.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm1_sut_ports = self.vm_sut_1.get_ports("any") port_id_0 = 0 packet_num = 10 - self.vm1_testpmd = PmdOutput(self.vm_dut_1) + self.vm1_testpmd = PmdOutput(self.vm_sut_1) self.vm1_testpmd.start_testpmd(VM_CORES_MASK) vf1_mac = self.vm1_testpmd.get_port_mac(port_id_0) self.vm1_testpmd.execute_cmd("set fwd mac") self.vm1_testpmd.execute_cmd("set promisc all off") self.vm1_testpmd.execute_cmd("start") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK, "--eth-peer=0,%s" % vf1_mac) vf0_mac = self.vm0_testpmd.get_port_mac(port_id_0) self.vm0_testpmd.execute_cmd("set fwd mac") @@ -477,7 +477,7 @@ class TestSriovKvm(TestCase): time.sleep(2) vm1_start_stats = self.vm1_testpmd.get_pmd_stats(port_id_0) - self.send_packet(self.vm_dut_0, self.vm0_dut_ports, port_id_0, count=packet_num) + self.send_packet(self.vm_sut_0, self.vm0_sut_ports, port_id_0, count=packet_num) vm1_end_stats = self.vm1_testpmd.get_pmd_stats(port_id_0) self.verify( @@ -511,8 +511,8 @@ class TestSriovKvm(TestCase): ether_ip = {} ether_ip["ether"] = {"dest_mac": "%s" % vf_mac} self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, **ether_ip, @@ -535,7 +535,7 @@ class TestSriovKvm(TestCase): "NIC is [%s], skip this case" % self.nic, ) if self.is_eth_series_nic(700): - self.dut.logger.warning("NIC is [%s], skip this case" % self.nic) + self.sut_node.logger.warning("NIC is [%s], skip this case" % self.nic) return self.setup_2vm_prerequisite_flag = 0 @@ -552,7 +552,7 @@ class TestSriovKvm(TestCase): ether_ip = {} ether_ip["ether"] = {"dest_mac": "%s" % vf_mac} self.send_packet( - self.vm_dut_0, self.vm0_dut_ports, port_id_0, count=packet_num, **ether_ip + self.vm_sut_0, self.vm0_sut_ports, port_id_0, count=packet_num, **ether_ip ) vm0_end_stats = self.vm0_testpmd.get_pmd_stats(port_id_0) @@ -571,8 +571,8 @@ class TestSriovKvm(TestCase): ether_ip = {} ether_ip["ether"] = {"dest_mac": "%s" % vf_mac} self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, invert_verify=True, @@ -594,7 +594,7 @@ class TestSriovKvm(TestCase): "NIC is [%s], skip this case" % self.nic, ) if self.is_eth_series_nic(700): - self.dut.logger.warning("NIC is [%s], skip this case" % self.nic) + self.sut_node.logger.warning("NIC is [%s], skip this case" % self.nic) return port_id_0 = 0 @@ -610,8 +610,8 @@ class TestSriovKvm(TestCase): ether_ip = {} ether_ip["ether"] = {"dest_mac": "%s" % vf_mac} self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, **ether_ip, @@ -634,7 +634,7 @@ class TestSriovKvm(TestCase): "NIC is [%s], skip this case" % self.nic, ) if self.is_eth_series_nic(700): - self.dut.logger.warning("NIC is [%s], skip this case" % self.nic) + self.sut_node.logger.warning("NIC is [%s], skip this case" % self.nic) return self.setup_2vm_prerequisite_flag = 0 @@ -652,16 +652,16 @@ class TestSriovKvm(TestCase): ether_ip["ether"] = {"dest_mac": "%s" % vf_mac} if switch == "on": self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, **ether_ip, ) else: self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, invert_verify=True, @@ -692,7 +692,7 @@ class TestSriovKvm(TestCase): "NIC is [%s], skip this case" % self.nic, ) if self.is_eth_series_nic(700): - self.dut.logger.warning("NIC is [%s], skip this case" % self.nic) + self.sut_node.logger.warning("NIC is [%s], skip this case" % self.nic) return self.setup_2vm_prerequisite_flag = 0 @@ -707,12 +707,12 @@ class TestSriovKvm(TestCase): vm0_start_stats = self.vm0_testpmd.get_pmd_stats(port_id_0) if switch == "on": self.send_packet( - self.vm_dut_0, self.vm0_dut_ports, port_id_0, count=packet_num + self.vm_sut_0, self.vm0_sut_ports, port_id_0, count=packet_num ) else: self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, invert_verify=True, @@ -742,7 +742,7 @@ class TestSriovKvm(TestCase): "NIC is [%s], skip this case" % self.nic, ) if self.is_eth_series_nic(700): - self.dut.logger.warning("NIC is [%s], skip this case" % self.nic) + self.sut_node.logger.warning("NIC is [%s], skip this case" % self.nic) return self.vm0_testpmd.execute_cmd("stop") @@ -760,7 +760,7 @@ class TestSriovKvm(TestCase): vm0_start_stats = self.vm0_testpmd.get_pmd_stats(port_id_0) self.send_packet( - self.vm_dut_0, self.vm0_dut_ports, port_id_0, count=packet_num + self.vm_sut_0, self.vm0_sut_ports, port_id_0, count=packet_num ) vm0_end_stats = self.vm0_testpmd.get_pmd_stats(port_id_0) @@ -787,7 +787,7 @@ class TestSriovKvm(TestCase): "NIC is [%s], skip this case" % self.nic, ) if self.is_eth_series_nic(700): - self.dut.logger.warning("NIC is [%s], skip this case" % self.nic) + self.sut_node.logger.warning("NIC is [%s], skip this case" % self.nic) return port_id_0 = 0 @@ -805,16 +805,16 @@ class TestSriovKvm(TestCase): ether_ip["ether"] = {"dest_mac": "%s" % vf_mac} if switch == "on": self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, **ether_ip, ) else: self.send_packet( - self.vm_dut_0, - self.vm0_dut_ports, + self.vm_sut_0, + self.vm0_sut_ports, port_id_0, count=packet_num, invert_verify=True, @@ -857,7 +857,7 @@ class TestSriovKvm(TestCase): if getattr(self, "vm1", None): self.vm1.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) diff --git a/tests/TestSuite_stats_checks.py b/tests/TestSuite_stats_checks.py index 331e9c3b..2e6558e4 100644 --- a/tests/TestSuite_stats_checks.py +++ b/tests/TestSuite_stats_checks.py @@ -14,7 +14,7 @@ import struct from time import sleep from typing import Iterator, List, Tuple -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils from framework.pmd_output import PmdOutput from framework.test_case import TestCase @@ -38,19 +38,19 @@ class TestStatsChecks(TestCase): """ An abstraction to remove repeated code throughout the subclasses of this class """ - return self.dut.send_expect(command, "testpmd>") + return self.sut_node.send_expect(command, "testpmd>") def get_mac_address_for_port(self, port_id: int) -> str: - return self.dut.get_mac_address(port_id) + return self.sut_node.get_mac_address(port_id) def send_scapy_packet(self, port_id: int, packet: str): - itf = self.tester.get_interface(port_id) + itf = self.tg_node.get_interface(port_id) - self.tester.scapy_foreground() - mac = self.dut.get_mac_address(port_id) - self.tester.scapy_append(f'dutmac="{mac}"') - self.tester.scapy_append(f'sendp({packet}, iface="{itf}")') - return self.tester.scapy_execute() + self.tg_node.scapy_foreground() + mac = self.sut_node.get_mac_address(port_id) + self.tg_node.scapy_append(f'sutmac="{mac}"') + self.tg_node.scapy_append(f'sendp({packet}, iface="{itf}")') + return self.tg_node.scapy_execute() def get_random_ip(self): str_ip = RANDOM_IP_POOL[random.randint(0, len(RANDOM_IP_POOL) - 1)] @@ -74,7 +74,7 @@ class TestStatsChecks(TestCase): padding = pktsize - IP_HEADER_LEN out = self.send_scapy_packet( port_id, - f'Ether(dst=dutmac, src="52:00:00:00:00:00")/IP()/Raw(load="\x50"*{padding})', + f'Ether(dst=sutmac, src="52:00:00:00:00:00")/IP()/Raw(load="\x50"*{padding})', ) return out @@ -84,18 +84,18 @@ class TestStatsChecks(TestCase): port: send pkt port count: pkt count """ - pkt = packet.Packet() - pkt.assign_layers(["ether", "ipv4"]) + scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "ipv4"]) mac = ( - self.pmdout.get_port_mac(port) if if_vf else self.dut.get_mac_address(port) + self.pmdout.get_port_mac(port) if if_vf else self.sut_node.get_mac_address(port) ) for i in range(count): src_ip = self.get_random_ip() - pkt.config_layers([("ether", {"dst": mac}), ("ipv4", {"src": src_ip})]) - pkt.send_pkt( - crb=self.tester, - tx_port=self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + scapy_pkt_builder.config_layers([("ether", {"dst": mac}), ("ipv4", {"src": src_ip})]) + scapy_pkt_builder.send_pkt( + node=self.tg_node, + tx_port=self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ), ) @@ -237,36 +237,36 @@ class TestStatsChecks(TestCase): """ Prerequisite steps for each test suit. """ - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.rx_port = self.dut_ports[0] - self.tx_port = self.dut_ports[1] + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.rx_port = self.sut_ports[0] + self.tx_port = self.sut_ports[1] - cores = self.dut.get_core_list("1S/2C/1T") + cores = self.sut_node.get_core_list("1S/2C/1T") self.coremask = utils.create_mask(cores) self.port_mask = utils.create_mask([self.rx_port, self.tx_port]) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) def set_up(self): """ This is to clear up environment before the case run. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ When the case of this test suite finished, the environment should clear up. """ - self.dut.kill_all() + self.sut_node.kill_all() def test_stats_checks(self): self.pmdout.start_testpmd("Default") @@ -284,10 +284,10 @@ class TestStatsChecks(TestCase): self.xstats_check(self.rx_port, self.tx_port) def test_xstats_check_vf(self): - self.dut.generate_sriov_vfs_by_port(self.dut_ports[0], 1, self.kdriver) - self.vf_port = self.dut.ports_info[self.dut_ports[0]]["vfs_port"][0] + self.sut_node.generate_sriov_vfs_by_port(self.sut_ports[0], 1, self.kdriver) + self.vf_port = self.sut_node.ports_info[self.sut_ports[0]]["vfs_port"][0] self.vf_port.bind_driver(driver="vfio-pci") - self.vf_port_pci = self.dut.ports_info[self.dut_ports[0]]["sriov_vfs_pci"][0] + self.vf_port_pci = self.sut_node.ports_info[self.sut_ports[0]]["sriov_vfs_pci"][0] self.pmdout.start_testpmd( "default", "--rxq=4 --txq=4", eal_param="-a %s" % self.vf_port_pci ) diff --git a/tests/TestSuite_telemetry.py b/tests/TestSuite_telemetry.py index 97c1c748..ade003d0 100644 --- a/tests/TestSuite_telemetry.py +++ b/tests/TestSuite_telemetry.py @@ -92,7 +92,7 @@ class TestTelemetry(TestCase): query_script = os.path.join(self.output_path, fileName) with open(query_script, "wb") as fp: fp.write(("#! /usr/bin/env python" + os.linesep + script_content).encode()) - self.dut.session.copy_file_to(query_script, self.target_dir) + self.sut_node.session.copy_file_to(query_script, self.target_dir) self.query_tool = ";".join( [ "cd {}".format(self.target_dir), @@ -120,9 +120,9 @@ class TestTelemetry(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @@ -140,24 +140,24 @@ class TestTelemetry(TestCase): return output_path def d_console(self, cmds): - return self.execute_cmds(cmds, con_name="dut") + return self.execute_cmds(cmds, con_name="sut") def d_a_console(self, cmds): - return self.execute_cmds(cmds, con_name="dut_alt") + return self.execute_cmds(cmds, con_name="sut_alt") def get_console(self, name): - if name == "dut": - console = self.dut.send_expect - msg_pipe = self.dut.get_session_output - elif name == "dut_alt": - console = self.dut.alt_session.send_expect - msg_pipe = self.dut.alt_session.session.get_output_all + if name == "sut": + console = self.sut_node.send_expect + msg_pipe = self.sut_node.get_session_output + elif name == "sut_alt": + console = self.sut_node.alt_session.send_expect + msg_pipe = self.sut_node.alt_session.session.get_output_all else: msg = "{} not created".format(name) raise Exception(msg) return console, msg_pipe - def execute_cmds(self, cmds, con_name="dut"): + def execute_cmds(self, cmds, con_name="sut"): console, msg_pipe = self.get_console(con_name) if not cmds: return @@ -186,22 +186,22 @@ class TestTelemetry(TestCase): def init_test_binary_files(self): # initialize testpmd self.testpmd_status = "close" - self.testpmd = PmdOutput(self.dut) + self.testpmd = PmdOutput(self.sut_node) # prepare telemetry tool self.rename_dpdk_telemetry_tool() def get_allowlist(self, num=1, nic_types=2): self.used_ports = [] - if len(self.dut_ports) < 4 or len(self.nic_grp) < nic_types: - self.used_ports = self.dut_ports + if len(self.sut_ports) < 4 or len(self.nic_grp) < nic_types: + self.used_ports = self.sut_ports return None pci_addrs = [ pci_addr for pci_addrs in list(self.nic_grp.values())[:nic_types] for pci_addr in pci_addrs[:num] ] - for index in self.dut_ports: - info = self.dut.ports_info[index] + for index in self.sut_ports: + info = self.sut_node.ports_info[index] if info["pci"] not in pci_addrs: continue self.used_ports.append(index) @@ -211,8 +211,8 @@ class TestTelemetry(TestCase): def start_telemetry_server(self, allowlist=None): if self.testpmd_status != "close": return None - # use dut first port's socket - socket = self.dut.get_numa_id(0) + # use SUT first port's socket + socket = self.sut_node.get_numa_id(0) config = "Default" eal_option = "--telemetry " + allowlist if allowlist else "--telemetry" output = self.testpmd.start_testpmd(config, eal_param=eal_option, socket=socket) @@ -267,7 +267,7 @@ class TestTelemetry(TestCase): msg = "faile to query metric data" self.verify("Get metrics done" in output, msg) dst_file = os.path.join(self.output_path, json_name) - self.dut.session.copy_file_from(json_file, dst_file) + self.sut_node.session.copy_file_from(json_file, dst_file) msg = "failed to get {}".format(json_name) self.verify(os.path.exists(dst_file), msg) with open(dst_file, "r") as fp: @@ -304,15 +304,15 @@ class TestTelemetry(TestCase): msg = "expected select items not existed" self.verify(all([item in output for item in expected_strs]), msg) cmd = ["1", ":", 10] - output = self.dut_s_session.send_expect(*cmd) - output = self.dut_s_session.session.get_output_all() + output = self.sut_s_session.send_expect(*cmd) + output = self.sut_s_session.session.get_output_all() cmd = ["4", "#", 5] - output = self.dut_s_session.send_expect(*cmd) + output = self.sut_s_session.send_expect(*cmd) def start_telemetry_client(self): - self.dut_s_session = self.dut.new_session() + self.sut_s_session = self.sut_node.new_session() dpdk_tool = os.path.join(self.target_dir, "usertools/dpdk-telemetry-client.py") - output = self.dut_s_session.send_expect("python3 " + dpdk_tool, ":", 5) + output = self.sut_s_session.send_expect("python3 " + dpdk_tool, ":", 5) return output def close_telemetry_client(self): @@ -324,7 +324,7 @@ class TestTelemetry(TestCase): process_pid = out.splitlines()[0] cmd = ["kill -TERM {0}".format(process_pid), "# "] self.d_a_console(cmd) - self.dut.close_session(self.dut_s_session) + self.sut_node.close_session(self.sut_s_session) def check_metric_data(self): metric_data = self.get_metric_data() @@ -334,7 +334,7 @@ class TestTelemetry(TestCase): for port_index in metric_data: msg = "<{}> is not the expected port".format(port_index) self.verify(port_index is not None and port_index in port_index_list, msg) - output = self.dut.get_session_output() + output = self.sut_node.get_session_output() self.verify("failed" not in output, output) # set rx/tx configuration by testpmd cmds = [["stop", "testpmd>", 15], ["clear port xstats all", "testpmd>", 15]] @@ -402,7 +402,7 @@ class TestTelemetry(TestCase): def get_ports_by_nic_type(self): nic_grp = {} - for info in self.dut.ports_info: + for info in self.sut_node.ports_info: nic_type = info["type"] if nic_type not in nic_grp: nic_grp[nic_type] = [] @@ -451,15 +451,15 @@ class TestTelemetry(TestCase): try: self.start_telemetry_server() metric_data = self.get_metric_data() - port_index_list = list(range(len(self.dut_ports))) + port_index_list = list(range(len(self.sut_ports))) msg = "haven't get all ports metric data" - self.verify(len(self.dut_ports) == len(metric_data), msg) + self.verify(len(self.sut_ports) == len(metric_data), msg) for port_index in metric_data: msg = "<{}> is not the expected port".format(port_index) self.verify( port_index is not None and port_index in port_index_list, msg ) - output = self.dut.get_session_output() + output = self.sut_node.get_session_output() self.verify("failed" not in output, output) self.close_telemetry_server() except Exception as e: @@ -486,7 +486,7 @@ class TestTelemetry(TestCase): ) self.verify(len(list(self.nic_grp.values())[0]) >= 4, msg) try: - self.used_ports = self.dut_ports + self.used_ports = self.sut_ports self.start_telemetry_server() # check telemetry metric data self.check_metric_data() @@ -530,7 +530,7 @@ class TestTelemetry(TestCase): ) try: - self.used_ports = self.dut_ports + self.used_ports = self.sut_ports self.start_telemetry_server() # check telemetry metric data self.check_metric_data() @@ -548,8 +548,8 @@ class TestTelemetry(TestCase): Run before each test suite """ # get ports information - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") self.init_test_binary_files() self.nic_grp = self.get_ports_by_nic_type() self.used_ports = [] diff --git a/tests/TestSuite_testpmd_perf.py b/tests/TestSuite_testpmd_perf.py index 4caa647e..a341cb94 100644 --- a/tests/TestSuite_testpmd_perf.py +++ b/tests/TestSuite_testpmd_perf.py @@ -35,12 +35,12 @@ class TestPmdPerf(TestCase, PerfTestBase): """ self.verify(self.nic in self.supported_nics, "Not required NIC ") # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) # init common base class parameters PerfTestBase.__init__(self, valports, socket, bin_type=BIN_TYPE.PMD) # preset testing environment @@ -63,7 +63,7 @@ class TestPmdPerf(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_rfc2544_ipv4_lpm(self): diff --git a/tests/TestSuite_timer.py b/tests/TestSuite_timer.py index a0846f43..4bf3c90e 100644 --- a/tests/TestSuite_timer.py +++ b/tests/TestSuite_timer.py @@ -22,8 +22,8 @@ class TestTimer(TestCase): timer prerequisites """ - out = self.dut.build_dpdk_apps("examples/timer") - self.app_timer_path = self.dut.apps_name["timer"] + out = self.sut_node.build_dpdk_apps("examples/timer") + self.app_timer_path = self.sut_node.apps_name["timer"] self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -39,16 +39,16 @@ class TestTimer(TestCase): """ # get the mask for the first core - cores = self.dut.get_core_list("1S/1C/1T") - eal_para = self.dut.create_eal_parameters(cores="1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/1C/1T") # run timer on the background cmdline = "./%s %s " % (self.app_timer_path, eal_para) + " &" - self.dut.send_expect(cmdline, "# ", 1) + self.sut_node.send_expect(cmdline, "# ", 1) time.sleep(15) - out = self.dut.get_session_output() - self.dut.send_expect("killall timer", "# ", 5) + out = self.sut_node.get_session_output() + self.sut_node.send_expect("killall timer", "# ", 5) # verify timer0 utils.regexp(out, r"timer0_cb\(\) on lcore (\d+)") diff --git a/tests/TestSuite_tso.py b/tests/TestSuite_tso.py index 778ba3cc..439c28e0 100644 --- a/tests/TestSuite_tso.py +++ b/tests/TestSuite_tso.py @@ -13,32 +13,32 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream DEFAULT_MUT = 1500 TSO_MTU = 9000 class TestTSO(TestCase): - dut_ports = [] + sut_ports = [] def set_up_all(self): """ Run at the start of each test suite. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports(self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) # Verify that enough ports are available - self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing") + self.verify(len(self.sut_ports) >= 2, "Insufficient ports for testing") # Verify that enough threads are available - self.all_cores_mask = utils.create_mask(self.dut.get_core_list("all")) - self.portMask = utils.create_mask([self.dut_ports[0], self.dut_ports[1]]) - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.all_cores_mask = utils.create_mask(self.sut_node.get_core_list("all")) + self.portMask = utils.create_mask([self.sut_ports[0], self.sut_ports[1]]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.loading_sizes = [128, 800, 801, 1700, 2500] self.rxfreet_values = [0, 8, 16, 32, 64, 128] @@ -50,16 +50,16 @@ class TestTSO(TestCase): self.table_header.append("%s Mpps" % test_cycle["cores"]) self.table_header.append("% linerate") - self.eal_param = self.dut.create_eal_parameters( - cores="1S/1C/2T", socket=self.ports_socket, ports=self.dut_ports + self.eal_param = self.sut_node.create_eal_parameters( + cores="1S/1C/2T", socket=self.ports_socket, ports=self.sut_ports ) self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["tcp"] - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %s" % ( - self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ), TSO_MTU, ), @@ -72,8 +72,8 @@ class TestTSO(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ @@ -83,8 +83,8 @@ class TestTSO(TestCase): def tcpdump_start_sniffing(self, ifaces=[]): """ - Starts tcpdump in the background to sniff the tester interface where - the packets are transmitted to and from the self.dut. + Starts tcpdump in the background to sniff the TG interface where + the packets are transmitted to and from the self.sut_node. All the captured packets are going to be stored in a file for a post-analysis. """ @@ -94,16 +94,16 @@ class TestTSO(TestCase): "tcpdump -w /tmp/tcpdump_{0}.pcap -i {0} 2>tcpdump_{0}.out &" ).format(iface) del_cmd = ("rm -f /tmp/tcpdump_{0}.pcap").format(iface) - self.tester.send_expect(del_cmd, "#") - self.tester.send_expect(command, "#") + self.tg_node.send_expect(del_cmd, "#") + self.tg_node.send_expect(command, "#") def tcpdump_stop_sniff(self): """ Stops the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") time.sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "#") + self.tg_node.send_expect('echo "Cleaning buffer"', "#") time.sleep(1) def tcpdump_command(self, command): @@ -111,13 +111,13 @@ class TestTSO(TestCase): Sends a tcpdump related command and returns an integer from the output """ - result = self.tester.send_expect(command, "#") + result = self.tg_node.send_expect(command, "#") return int(result.strip()) def number_of_packets(self, iface): """ By reading the file generated by tcpdump it counts how many packets were - forwarded by the sample app and received in the self.tester. The sample app + forwarded by the sample app and received in the self.tg_node. The sample app will add a known MAC address for the test to look for. """ @@ -131,7 +131,7 @@ class TestTSO(TestCase): """ Execute scanner to return results """ - scanner_result = self.tester.send_expect(scanner, "#", 60) + scanner_result = self.tg_node.send_expect(scanner, "#", 60) fially_result = re.findall(r"length( \d+)", scanner_result) return list(fially_result) @@ -143,10 +143,10 @@ class TestTSO(TestCase): return self.tcpdump_scanner(scanner.format(**locals())) def get_chksum_value_and_verify(self, dump_pcap, save_file, Nic_list): - packet = Packet() - self.pks = packet.read_pcapfile(dump_pcap, self.tester) + scapy_pkt_builder = ScapyPacketBuilder() + self.pks = scapy_pkt_builder.read_pcapfile(dump_pcap, self.tg_node) for i in range(len(self.pks)): - self.pks = packet.read_pcapfile(dump_pcap, self.tester) + self.pks = scapy_pkt_builder.read_pcapfile(dump_pcap, self.tg_node) pks = self.pks[i] out = pks.show chksum_list = re.findall(r"chksum=(0x\w+)", str(out)) @@ -158,11 +158,11 @@ class TestTSO(TestCase): elif "GRE" in str(out): pks["GRE"]["IP"].chksum = None pks["GRE"]["TCP"].chksum = None - packet.save_pcapfile(self.tester, filename=save_file) - self.pks1 = packet.read_pcapfile(save_file, self.tester) + scapy_pkt_builder.save_pcapfile(self.tg_node, filename=save_file) + self.pks1 = scapy_pkt_builder.read_pcapfile(save_file, self.tg_node) out1 = self.pks1[i].show chksum_list1 = re.findall(r"chksum=(0x\w+)", str(out1)) - self.tester.send_expect("rm -rf %s" % save_file, "#") + self.tg_node.send_expect("rm -rf %s" % save_file, "#") if self.nic in Nic_list and "VXLAN" in str(out): self.verify( chksum_list[0] == chksum_list1[0] @@ -180,24 +180,24 @@ class TestTSO(TestCase): """ TSO IPv4 TCP, IPv6 TCP, VXLan testing """ - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - rx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + rx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) - mac = self.dut.get_mac_address(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/2C/2T") + mac = self.sut_node.get_mac_address(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/2C/2T") self.verify(cores is not None, "Insufficient cores for speed testing") self.coreMask = utils.create_mask(cores) - self.tester.send_expect( + self.tg_node.send_expect( "ethtool -K %s rx off tx off tso off gso off gro off lro off" % tx_interface, "# ", ) - self.tester.send_expect("ip l set %s up" % tx_interface, "# ") + self.tg_node.send_expect("ip l set %s up" % tx_interface, "# ") if self.nic in ["cavium_a063", "cavium_a064"]: cmd = ( @@ -210,57 +210,57 @@ class TestTSO(TestCase): % (self.path, self.eal_param, self.portMask, TSO_MTU) ) - self.dut.send_expect(cmd, "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("port stop all", "testpmd> ", 120) - self.dut.send_expect("csum set ip hw %d" % self.dut_ports[0], "testpmd> ", 120) - self.dut.send_expect("csum set udp hw %d" % self.dut_ports[0], "testpmd> ", 120) - self.dut.send_expect("csum set tcp hw %d" % self.dut_ports[0], "testpmd> ", 120) + self.sut_node.send_expect(cmd, "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("port stop all", "testpmd> ", 120) + self.sut_node.send_expect("csum set ip hw %d" % self.sut_ports[0], "testpmd> ", 120) + self.sut_node.send_expect("csum set udp hw %d" % self.sut_ports[0], "testpmd> ", 120) + self.sut_node.send_expect("csum set tcp hw %d" % self.sut_ports[0], "testpmd> ", 120) if self.nic not in ["cavium_a063", "cavium_a064"]: - self.dut.send_expect( - "csum set sctp hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set sctp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set outer-ip hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-ip hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect("csum set ip hw %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect("csum set udp hw %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect("csum set tcp hw %d" % self.dut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("csum set ip hw %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("csum set udp hw %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("csum set tcp hw %d" % self.sut_ports[1], "testpmd> ", 120) if self.nic not in ["cavium_a063", "cavium_a064"]: - self.dut.send_expect( - "csum set sctp hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set sctp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set outer-ip hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-ip hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect("tso set 800 %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect("set fwd csum", "testpmd> ", 120) - self.dut.send_expect("port start all", "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("tso set 800 %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("set fwd csum", "testpmd> ", 120) + self.sut_node.send_expect("port start all", "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ") - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() time.sleep(5) for loading_size in self.loading_sizes: # IPv4 tcp test self.tcpdump_start_sniffing([tx_interface, rx_interface]) - out = self.dut.send_expect("clear port stats all", "testpmd> ", 120) - self.tester.scapy_append( + out = self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) + self.tg_node.scapy_append( 'sendp([Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2")/TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' % (mac, loading_size, tx_interface) ) - out = self.tester.scapy_execute() - out = self.dut.send_expect("show port stats all", "testpmd> ", 120) + out = self.tg_node.scapy_execute() + out = self.sut_node.send_expect("show port stats all", "testpmd> ", 120) print(out) self.tcpdump_stop_sniff() rx_stats = self.number_of_packets(rx_interface) @@ -288,13 +288,13 @@ class TestTSO(TestCase): for loading_size in self.loading_sizes: # IPv6 tcp test self.tcpdump_start_sniffing([tx_interface, rx_interface]) - out = self.dut.send_expect("clear port stats all", "testpmd> ", 120) - self.tester.scapy_append( + out = self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) + self.tg_node.scapy_append( 'sendp([Ether(dst="%s", src="52:00:00:00:00:00")/IPv6(src="FE80:0:0:0:200:1FF:FE00:200", dst="3555:5555:6666:6666:7777:7777:8888:8888")/TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' % (mac, loading_size, tx_interface) ) - out = self.tester.scapy_execute() - out = self.dut.send_expect("show port stats all", "testpmd> ", 120) + out = self.tg_node.scapy_execute() + out = self.sut_node.send_expect("show port stats all", "testpmd> ", 120) print(out) self.tcpdump_stop_sniff() rx_stats = self.number_of_packets(rx_interface) @@ -323,11 +323,11 @@ class TestTSO(TestCase): """ TSO IPv4 TCP, IPv6 TCP, VXLan testing """ - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - rx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + rx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) Nic_list = [ @@ -339,90 +339,90 @@ class TestTSO(TestCase): save_file = "/tmp/save.pcap" dump_pcap = "/tmp/tcpdump_%s.pcap" % rx_interface - mac = self.dut.get_mac_address(self.dut_ports[0]) + mac = self.sut_node.get_mac_address(self.sut_ports[0]) - cores = self.dut.get_core_list("1S/2C/2T") + cores = self.sut_node.get_core_list("1S/2C/2T") self.verify(cores is not None, "Insufficient cores for speed testing") self.coreMask = utils.create_mask(cores) - self.tester.send_expect( + self.tg_node.send_expect( "ethtool -K %s rx off tx off tso off gso off gro off lro off" % tx_interface, "# ", ) - self.tester.send_expect("ip l set %s up" % tx_interface, "# ") + self.tg_node.send_expect("ip l set %s up" % tx_interface, "# ") cmd = ( "%s %s -- -i --rxd=512 --txd=512 --burst=32 --rxfreet=64 --mbcache=128 --portmask=%s --max-pkt-len=%s --txpt=36 --txht=0 --txwt=0 --txfreet=32 --txrst=32 " % (self.path, self.eal_param, self.portMask, TSO_MTU) ) - self.dut.send_expect(cmd, "testpmd> ", 120) - self.dut.send_expect("set verbose 1", "testpmd> ", 120) - self.dut.send_expect("port stop all", "testpmd> ", 120) - self.dut.send_expect("csum set ip hw %d" % self.dut_ports[0], "testpmd> ", 120) - self.dut.send_expect("csum set udp hw %d" % self.dut_ports[0], "testpmd> ", 120) - self.dut.send_expect("csum set tcp hw %d" % self.dut_ports[0], "testpmd> ", 120) - self.dut.send_expect( - "csum set sctp hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect(cmd, "testpmd> ", 120) + self.sut_node.send_expect("set verbose 1", "testpmd> ", 120) + self.sut_node.send_expect("port stop all", "testpmd> ", 120) + self.sut_node.send_expect("csum set ip hw %d" % self.sut_ports[0], "testpmd> ", 120) + self.sut_node.send_expect("csum set udp hw %d" % self.sut_ports[0], "testpmd> ", 120) + self.sut_node.send_expect("csum set tcp hw %d" % self.sut_ports[0], "testpmd> ", 120) + self.sut_node.send_expect( + "csum set sctp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set outer-ip hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-ip hw %d" % self.sut_ports[0], "testpmd> ", 120 ) if self.nic in Nic_list: self.logger.warning( "Warning: Intel® Ethernet 700 Series not support outer udp." ) else: - self.dut.send_expect( - "csum set outer-udp hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-udp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect("csum set ip hw %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect("csum set udp hw %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect("csum set tcp hw %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect( - "csum set sctp hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect("csum set ip hw %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("csum set udp hw %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("csum set tcp hw %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect( + "csum set sctp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set outer-ip hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-ip hw %d" % self.sut_ports[1], "testpmd> ", 120 ) if self.nic in Nic_list: self.logger.warning( "Warning: Intel® Ethernet 700 Series not support outer udp." ) else: - self.dut.send_expect( - "csum set outer-udp hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-udp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "tunnel_tso set 800 %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "tunnel_tso set 800 %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 120) - self.dut.send_expect("set fwd csum", "testpmd> ", 120) - self.dut.send_expect("port start all", "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("rx_vxlan_port add 4789 0", "testpmd> ", 120) + self.sut_node.send_expect("set fwd csum", "testpmd> ", 120) + self.sut_node.send_expect("port start all", "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ") - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() time.sleep(5) for loading_size in self.loading_sizes: # Vxlan test self.tcpdump_start_sniffing([tx_interface, rx_interface]) - out = self.dut.send_expect("clear port stats all", "testpmd> ", 120) - self.tester.scapy_append( + out = self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) + self.tg_node.scapy_append( 'sendp([Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2")/UDP(sport=1021,dport=4789)/VXLAN()/Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2")/TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' % (mac, mac, loading_size, tx_interface) ) - out = self.tester.scapy_execute() - out = self.dut.send_expect("show port stats all", "testpmd> ", 120) + out = self.tg_node.scapy_execute() + out = self.sut_node.send_expect("show port stats all", "testpmd> ", 120) print(out) self.tcpdump_stop_sniff() rx_stats = self.number_of_packets(rx_interface) @@ -451,13 +451,13 @@ class TestTSO(TestCase): for loading_size in self.loading_sizes: # Nvgre test self.tcpdump_start_sniffing([tx_interface, rx_interface]) - out = self.dut.send_expect("clear port stats all", "testpmd> ", 120) - self.tester.scapy_append( + out = self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) + self.tg_node.scapy_append( 'sendp([Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2",proto=47)/GRE(key_present=1,proto=0x6558,key=0x00001000)/Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2")/TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' % (mac, mac, loading_size, tx_interface) ) - out = self.tester.scapy_execute() - out = self.dut.send_expect("show port stats all", "testpmd> ", 120) + out = self.tg_node.scapy_execute() + out = self.sut_node.send_expect("show port stats all", "testpmd> ", 120) print(out) self.tcpdump_stop_sniff() rx_stats = self.number_of_packets(rx_interface) @@ -494,7 +494,7 @@ class TestTSO(TestCase): # run testpmd for each core config for test_cycle in self.test_cycles: core_config = test_cycle["cores"] - cores = self.dut.get_core_list(core_config, socket=self.ports_socket) + cores = self.sut_node.get_core_list(core_config, socket=self.ports_socket) self.coreMask = utils.create_mask(cores) if len(cores) > 2: queues = len(cores) // 2 @@ -510,49 +510,49 @@ class TestTSO(TestCase): self.rst_report(info, annex=True) self.rst_report(command_line + "\n\n", frame=True, annex=True) - self.dut.send_expect(command_line, "testpmd> ", 120) - self.dut.send_expect("port stop all", "testpmd> ", 120) - self.dut.send_expect( - "csum set ip hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect(command_line, "testpmd> ", 120) + self.sut_node.send_expect("port stop all", "testpmd> ", 120) + self.sut_node.send_expect( + "csum set ip hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set udp hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set udp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set tcp hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set tcp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set sctp hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set sctp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set outer-ip hw %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-ip hw %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % self.dut_ports[0], "testpmd> ", 120 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % self.sut_ports[0], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set ip hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set ip hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set udp hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set udp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set tcp hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set tcp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set sctp hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set sctp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum set outer-ip hw %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum set outer-ip hw %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect( - "csum parse-tunnel on %d" % self.dut_ports[1], "testpmd> ", 120 + self.sut_node.send_expect( + "csum parse-tunnel on %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.dut.send_expect("tso set 800 %d" % self.dut_ports[1], "testpmd> ", 120) - self.dut.send_expect("set fwd csum", "testpmd> ", 120) - self.dut.send_expect("port start all", "testpmd> ", 120) - self.dut.send_expect("set promisc all off", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("tso set 800 %d" % self.sut_ports[1], "testpmd> ", 120) + self.sut_node.send_expect("set fwd csum", "testpmd> ", 120) + self.sut_node.send_expect("port start all", "testpmd> ", 120) + self.sut_node.send_expect("set promisc all off", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ") for loading_size in self.loading_sizes: frame_size = loading_size + self.headers_size wirespeed = self.wirespeed(self.nic, frame_size, 2) @@ -561,36 +561,36 @@ class TestTSO(TestCase): self.logger.info("Running with frame size %d " % frame_size) payload_size = frame_size - self.headers_size for _port in range(2): - mac = self.dut.get_mac_address(self.dut_ports[_port]) + mac = self.sut_node.get_mac_address(self.sut_ports[_port]) pcap = os.sep.join([self.output_path, "dts{0}.pcap".format(_port)]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [Ether(dst="%s",src="52:00:00:00:00:01")/IP(src="192.168.1.1",dst="192.168.1.2")/TCP(sport=1021,dport=1021)/("X"*%d)])' % (pcap, mac, payload_size) ) tgen_input.append( ( - self.tester.get_local_port(self.dut_ports[_port]), - self.tester.get_local_port(self.dut_ports[1 - _port]), + self.tg_node.get_local_port(self.sut_ports[_port]), + self.tg_node.get_local_port(self.sut_ports[1 - _port]), "%s" % pcap, ) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) pps /= 1000000.0 test_cycle["Mpps"][loading_size] = pps test_cycle["pct"][loading_size] = pps * 100 // wirespeed - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) time.sleep(5) for n in range(len(self.test_cycles)): @@ -615,19 +615,19 @@ class TestTSO(TestCase): """ Run after each test case. """ - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): """ Run after each test suite. """ - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %s" % ( - self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ), DEFAULT_MUT, ), diff --git a/tests/TestSuite_tx_preparation.py b/tests/TestSuite_tx_preparation.py index 9b70699c..3a74634e 100644 --- a/tests/TestSuite_tx_preparation.py +++ b/tests/TestSuite_tx_preparation.py @@ -15,9 +15,8 @@ import re import subprocess import time -import framework.dut as dut +import framework.sut_node as sut from framework.config import PortConf -from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.settings import FOLDERS from framework.test_case import TestCase @@ -42,51 +41,51 @@ class TestTX_preparation(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports(self.nic) + self.ports = self.sut_node.get_ports(self.nic) self.verify(len(self.ports) >= 1, "Insufficient number of ports.") - self.used_dut_port = self.ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) - out = self.tester.send_expect( + self.used_sut_port = self.ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) + out = self.tg_node.send_expect( "ethtool -K %s rx off tx off tso off gso\ off gro off lro off" - % self.tester_intf, + % self.tg_intf, "#", ) if "Cannot change large-receive-offload" in out: - self.tester.send_expect( + self.tg_node.send_expect( "ethtool -K %s rx off tx off tso off gso\ off gro off" - % self.tester_intf, + % self.tg_intf, "#", ) - self.tester.send_expect("ifconfig %s mtu %s" % (self.tester_intf, Max_mtu), "#") + self.tg_node.send_expect("ifconfig %s mtu %s" % (self.tg_intf, Max_mtu), "#") def set_up(self): """ Run before each test case. """ - self.dut_testpmd = PmdOutput(self.dut) + self.sut_testpmd = PmdOutput(self.sut_node) # use one port test the case - self.dut_testpmd.start_testpmd( + self.sut_testpmd.start_testpmd( "Default", " --portmask=1 --port-topology=chained --max-pkt-len=%s --tx-offloads=0x8000" % Max_mtu, ) - self.dmac = self.dut_testpmd.get_port_mac(0) - self.dut_testpmd.execute_cmd("set fwd csum") - self.dut_testpmd.execute_cmd("set verbose 1") + self.dmac = self.sut_testpmd.get_port_mac(0) + self.sut_testpmd.execute_cmd("set fwd csum") + self.sut_testpmd.execute_cmd("set verbose 1") # enable ip/udp/tcp hardware checksum - self.dut_testpmd.execute_cmd("port stop all") - self.dut_testpmd.execute_cmd("csum set ip hw 0") - self.dut_testpmd.execute_cmd("csum set tcp hw 0") - self.dut_testpmd.execute_cmd("csum set udp hw 0") + self.sut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("csum set ip hw 0") + self.sut_testpmd.execute_cmd("csum set tcp hw 0") + self.sut_testpmd.execute_cmd("csum set udp hw 0") def start_tcpdump(self, rxItf): - # only sniff form dut packet and filter lldp packet + # only sniff form SUT packet and filter lldp packet param = "ether[12:2]!=0x88cc and ether src %s" % self.dmac - self.tester.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump %s -i %s -n -e -vv -w\ ./getPackageByTcpdump.cap 2> /dev/null& " % (param, rxItf), @@ -94,8 +93,8 @@ class TestTX_preparation(TestCase): ) def get_tcpdump_package(self): - self.tester.send_expect("killall tcpdump", "#") - return self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + return self.tg_node.send_expect( "tcpdump -nn -e -v -r ./getPackageByTcpdump.cap", "#" ) @@ -103,7 +102,7 @@ class TestTX_preparation(TestCase): """ Send packet to portid and output """ - self.pmd_output = PmdOutput(self.dut) + self.pmd_output = PmdOutput(self.sut_node) res = self.pmd_output.wait_link_status_up("all", 30) self.verify(res is True, "there have port link is down") @@ -139,12 +138,12 @@ class TestTX_preparation(TestCase): } for packet_type in list(pkts.keys()): - self.start_tcpdump(self.tester_intf) - self.tester.scapy_append( + self.start_tcpdump(self.tg_intf) + self.tg_node.scapy_append( 'sendp([%s], iface="%s", count=%d)' - % (pkts[packet_type], self.tester_intf, count) + % (pkts[packet_type], self.tg_intf, count) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() out = self.get_tcpdump_package() if packet_type == "IPv6/cksum UDP": self.verify( @@ -181,9 +180,9 @@ class TestTX_preparation(TestCase): """ ftag functional test """ - self.dut_testpmd.execute_cmd("tso set 0 0") - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("tso set 0 0") + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("start") self.send_packet_verify() @@ -191,9 +190,9 @@ class TestTX_preparation(TestCase): """ ftag functional test """ - self.dut_testpmd.execute_cmd("tso set %s 0" % TSO_value) - self.dut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("tso set %s 0" % TSO_value) + self.sut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("start") self.send_packet_verify(1) @@ -201,14 +200,14 @@ class TestTX_preparation(TestCase): """ Run after each test case. """ - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.quit() + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.quit() def tear_down_all(self): """ Run after each test suite. """ - self.tester.send_expect( - "ifconfig %s mtu %s" % (self.tester_intf, Normal_mtu), "#" + self.tg_node.send_expect( + "ifconfig %s mtu %s" % (self.tg_intf, Normal_mtu), "#" ) - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_uni_pkt.py b/tests/TestSuite_uni_pkt.py index 26cb7556..46427bcb 100644 --- a/tests/TestSuite_uni_pkt.py +++ b/tests/TestSuite_uni_pkt.py @@ -20,8 +20,8 @@ import time import framework.utils as utils from framework.exception import VerifyFailure -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -30,20 +30,20 @@ class TestUniPacket(TestCase): """ Run at the start of each test suite. """ - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() self.verify(len(ports) >= 2, "Insufficient ports for testing") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] # start testpmd - self.dut_port = valports[0] - socket_id = self.dut.get_numa_id(self.dut_port) - tester_port = self.tester.get_local_port(self.dut_port) - self.tester_iface = self.tester.get_interface(tester_port) - self.pmd = PmdOutput(self.dut) + self.sut_port = valports[0] + socket_id = self.sut_node.get_numa_id(self.sut_port) + tg_port = self.tg_node.get_local_port(self.sut_port) + self.tg_iface = self.tg_node.get_interface(tg_port) + self.pmd = PmdOutput(self.sut_node) self.pmd.start_testpmd(socket=socket_id) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("start", "testpmd>") - self.pmd.wait_link_status_up(port_id=self.dut_port) + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") + self.pmd.wait_link_status_up(port_id=self.sut_port) def set_up(self): """ @@ -56,9 +56,9 @@ class TestUniPacket(TestCase): time.sleep(1) for pkt_type in list(pkt_types.keys()): pkt_names = pkt_types[pkt_type] - pkt = Packet(pkt_type=pkt_type) - pkt.send_pkt(self.tester, tx_port=self.tester_iface, count=4) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_iface, count=4) + out = self.sut_node.get_session_output(timeout=2) for pkt_layer_name in pkt_names: if pkt_layer_name not in out: print((utils.RED("Fail to detect %s" % pkt_layer_name))) @@ -94,9 +94,9 @@ class TestUniPacket(TestCase): # Change this code end for DPDK-15109, the share code doest not support TIMESYNC, once supported then will enable for l2_type in list(self.L2_types.keys()): pkt_name = self.L2_types[l2_type] - pkt = Packet(pkt_type=l2_type) - pkt.send_pkt(self.tester, tx_port=self.tester_iface) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=l2_type) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_iface) + out = self.sut_node.get_session_output(timeout=2) if pkt_name in out: print((utils.GREEN("Detected L2 %s successfully" % l2_type))) else: @@ -587,14 +587,14 @@ class TestUniPacket(TestCase): def run_nvgre_cope(self, pkt_nvgre): time.sleep(1) for pkts in pkt_nvgre: - pkt = Packet() - pkt.assign_layers(pkts[2]) + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(pkts[2]) if "inner_icmp" in pkts[2]: - pkt.config_layers([("ipv6", {"nh": 47}), ("inner_ipv6", {"nh": 58})]) + scapy_pkt_builder.config_layers([("ipv6", {"nh": 47}), ("inner_ipv6", {"nh": 58})]) else: - pkt.config_layers([("ipv6", {"nh": 47}), ("inner_ipv6", {"nh": 132})]) - pkt.send_pkt(self.tester, tx_port=self.tester_iface) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder.config_layers([("ipv6", {"nh": 47}), ("inner_ipv6", {"nh": 132})]) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_iface) + out = self.sut_node.get_session_output(timeout=2) for pkt_layer_name in pkts[1]: if pkt_layer_name not in out: print((utils.RED("Fail to detect %s" % pkt_layer_name))) @@ -648,10 +648,10 @@ class TestUniPacket(TestCase): "Vxlan tunnel packet type detect only support by Intel® Ethernet 700 Series", ) - self.dut.send_expect("rx_vxlan_port add 4789 0", "testpmd>", 10) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("rx_vxlan_port add 4789 0", "testpmd>", 10) + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") vxlan_ipv4_default_packet_type = [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", @@ -742,9 +742,9 @@ class TestUniPacket(TestCase): } for packet in nsh_packets: - pk = Packet(nsh_packets[packet]) - pk.send_pkt(self.tester, self.tester_iface) - out = self.dut.get_session_output(timeout=2) + scapy_pkt_builder = ScapyPacketBuilder(nsh_packets[packet]) + scapy_pkt_builder.send_pkt(self.tg_node, self.tg_iface) + out = self.sut_node.get_session_output(timeout=2) self.verify( nsh_detect_message[packet] in out, "Packet Detection Error for : %s" % packet, @@ -763,5 +763,5 @@ class TestUniPacket(TestCase): Run after each test suite. Nothing to do. """ - self.dut.kill_all() + self.sut_node.kill_all() pass diff --git a/tests/TestSuite_unit_tests_cmdline.py b/tests/TestSuite_unit_tests_cmdline.py index de78ca6d..ed6eaf43 100644 --- a/tests/TestSuite_unit_tests_cmdline.py +++ b/tests/TestSuite_unit_tests_cmdline.py @@ -31,7 +31,7 @@ class TestUnitTestsCmdline(TestCase): Run at the start of each test suite. """ # icc compilation cost long long time. - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -43,11 +43,11 @@ class TestUnitTestsCmdline(TestCase): """ Run cmdline autotests in RTE command line. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("cmdline_autotest", "RTE>>", 60) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("cmdline_autotest", "RTE>>", 60) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") return "SUCCESS" diff --git a/tests/TestSuite_unit_tests_crc.py b/tests/TestSuite_unit_tests_crc.py index 33d33f7e..c2ee7b4d 100644 --- a/tests/TestSuite_unit_tests_crc.py +++ b/tests/TestSuite_unit_tests_crc.py @@ -29,7 +29,7 @@ class TestUnitTestsCrc(TestCase): """ Run at the start of each test suite. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -42,11 +42,11 @@ class TestUnitTestsCrc(TestCase): Run crc autotests in RTE command line. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("crc_autotest", "RTE>>", 60) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("crc_autotest", "RTE>>", 60) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") return "SUCCESS" diff --git a/tests/TestSuite_unit_tests_cryptodev_func.py b/tests/TestSuite_unit_tests_cryptodev_func.py index 816109e8..266604d3 100644 --- a/tests/TestSuite_unit_tests_cryptodev_func.py +++ b/tests/TestSuite_unit_tests_cryptodev_func.py @@ -11,14 +11,14 @@ from framework.test_case import TestCase class UnitTestsCryptodev(TestCase): def set_up_all(self): - self._app_path = self.dut.apps_name["test"] + self._app_path = self.sut_node.apps_name["test"] cc.bind_qat_device(self, "vfio-pci") def set_up(self): pass def tear_down(self): - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): pass @@ -111,19 +111,19 @@ class UnitTestsCryptodev(TestCase): w = cc.get_qat_devices(self, num=1) self.logger.info("STEP_TEST: " + testsuite) - self.dut.send_expect("dmesg -C", "# ", 30) + self.sut_node.send_expect("dmesg -C", "# ", 30) cmd_str = cc.get_dpdk_app_cmd_str( self._app_path, eal_opt_str + " --log-level=6 -a %s" % w[0] ) - self.dut.send_expect(cmd_str, "RTE>>", 30) + self.sut_node.send_expect(cmd_str, "RTE>>", 30) out = "" try: - out = self.dut.send_expect(testsuite, "RTE>>", timeout) - self.dut.send_expect("quit", "# ", 30) + out = self.sut_node.send_expect(testsuite, "RTE>>", timeout) + self.sut_node.send_expect("quit", "# ", 30) except Exception as ex: self.logger.error("Cryptodev Unit Tests Exception") - dmesg = self.dut.alt_session.send_expect("dmesg", "# ", 30) + dmesg = self.sut_node.alt_session.send_expect("dmesg", "# ", 30) self.logger.error("dmesg info:") self.logger.error(dmesg) diff --git a/tests/TestSuite_unit_tests_dump.py b/tests/TestSuite_unit_tests_dump.py index 1f00fc8a..b60ba00d 100644 --- a/tests/TestSuite_unit_tests_dump.py +++ b/tests/TestSuite_unit_tests_dump.py @@ -34,9 +34,9 @@ class TestUnitTestsDump(TestCase): Nothing to do here. """ # Based on h/w type, choose how many ports to use - self.cores = self.dut.get_core_list("all") - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.cores = self.sut_node.get_core_list("all") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.start_test_time = 60 self.run_cmd_time = 60 @@ -51,25 +51,25 @@ class TestUnitTestsDump(TestCase): """ Run history log dump test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_log_history", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_log_history", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") self.verify("EAL" in out, "Test failed") def test_ring_dump(self): """ Run history log dump test case. """ - eal_params = self.dut.create_eal_parameters(cores="1S/4C/1T") - cmd = self.dut.apps_name["test-pmd"] + eal_params + "-- -i" + eal_params = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + cmd = self.sut_node.apps_name["test-pmd"] + eal_params + "-- -i" - self.dut.send_expect("%s" % cmd, "testpmd>", self.start_test_time) - out = self.dut.send_expect("dump_ring", "testpmd>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("%s" % cmd, "testpmd>", self.start_test_time) + out = self.sut_node.send_expect("dump_ring", "testpmd>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") match_regex = "ring <(.*?)>" m = re.compile(r"%s" % match_regex, re.S) results = m.findall(out) @@ -78,47 +78,47 @@ class TestUnitTestsDump(TestCase): # Only check the last one to make sure ring_dump function work. self.verify("MP_mb_pool_0" in results, "dump ring name failed") for result in results: - self.dut.send_expect("%s" % cmd, "testpmd>", self.start_test_time) - out = self.dut.send_expect( + self.sut_node.send_expect("%s" % cmd, "testpmd>", self.start_test_time) + out = self.sut_node.send_expect( "dump_ring %s" % result, "testpmd>", self.run_cmd_time ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("capacity" in out, "dump ring name failed") def test_mempool_dump(self): """ Run mempool dump test case. """ - eal_params = self.dut.create_eal_parameters(cores="1S/4C/1T") - cmd = self.dut.apps_name["test-pmd"] + eal_params + "-- -i" + eal_params = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + cmd = self.sut_node.apps_name["test-pmd"] + eal_params + "-- -i" - self.dut.send_expect("%s" % cmd, "testpmd>", self.start_test_time) - out = self.dut.send_expect("dump_mempool", "testpmd>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("%s" % cmd, "testpmd>", self.start_test_time) + out = self.sut_node.send_expect("dump_mempool", "testpmd>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") match_regex = "mempool <(.*?)>@0x(.*?)\r\n" m = re.compile(r"%s" % match_regex, re.S) results = m.findall(out) self.verify(results[0][0] == "mb_pool_0", "dump mempool name failed") for result in results: - self.dut.send_expect("%s" % cmd, "testpmd>", self.start_test_time) - out = self.dut.send_expect( + self.sut_node.send_expect("%s" % cmd, "testpmd>", self.start_test_time) + out = self.sut_node.send_expect( "dump_mempool %s" % result[0], "testpmd>", self.run_cmd_time * 2 ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("internal cache infos:" in out, "dump mempool name failed") def test_physmem_dump(self): """ Run physical memory dump test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_physmem", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_physmem", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") elements = [ "Segment", "IOVA", @@ -145,12 +145,12 @@ class TestUnitTestsDump(TestCase): """ Run memzone dump test case. """ - eal_params = self.dut.create_eal_parameters(cores="1S/4C/1T") - cmd = self.dut.apps_name["test-pmd"] + eal_params + "-- -i" + eal_params = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + cmd = self.sut_node.apps_name["test-pmd"] + eal_params + "-- -i" - self.dut.send_expect("%s" % cmd, "testpmd>", self.start_test_time) - out = self.dut.send_expect("dump_memzone", "testpmd>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("%s" % cmd, "testpmd>", self.start_test_time) + out = self.sut_node.send_expect("dump_memzone", "testpmd>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") elements = ["Zone", "name", "len", "virt", "socket_id", "flags"] match_regex = "Zone (\d):" @@ -169,13 +169,13 @@ class TestUnitTestsDump(TestCase): """ Run struct size dump test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_struct_sizes", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_struct_sizes", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") elements = ["struct rte_mbuf", "struct rte_mempool", "struct rte_ring"] match_regex = "" @@ -191,29 +191,29 @@ class TestUnitTestsDump(TestCase): """ Run devargs dump test case. """ - test_port = self.dut_ports[0] - pci_address = self.dut.ports_info[test_port]["pci"] - eal_params = self.dut.create_eal_parameters( + test_port = self.sut_ports[0] + pci_address = self.sut_node.ports_info[test_port]["pci"] + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, b_ports=[pci_address] ) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_devargs", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_devargs", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") block_str = " %s" % pci_address self.verify(block_str in out, "Dump block list failed") - eal_params1 = self.dut.create_eal_parameters( + eal_params1 = self.sut_node.create_eal_parameters( cores=self.cores, ports=[pci_address] ) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params1, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_devargs", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_devargs", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") allow_str = "[pci]: %s" % pci_address self.verify(allow_str in out, "Dump allow list failed") @@ -222,13 +222,13 @@ class TestUnitTestsDump(TestCase): """ Run dump malloc dump test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_malloc_stats", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_malloc_stats", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") match_regex = "Heap id:(\d*)" m = re.compile(r"%s" % match_regex, re.DOTALL) results = m.findall(out) @@ -241,13 +241,13 @@ class TestUnitTestsDump(TestCase): """ Run malloc heaps dump test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_malloc_heaps", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_malloc_heaps", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") elements = ["Heap id", "Heap size", "Heap alloc count"] match_regex = "" @@ -264,13 +264,13 @@ class TestUnitTestsDump(TestCase): """ Run log types dump test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect( app_name + eal_params, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("dump_log_types", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("dump_log_types", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") elements = ["id"] match_regex = "id (\d):" @@ -287,7 +287,7 @@ class TestUnitTestsDump(TestCase): Run after each test case. Stop application test after every case. """ - self.dut.kill_all() + self.sut_node.kill_all() pass def tear_down_all(self): diff --git a/tests/TestSuite_unit_tests_eal.py b/tests/TestSuite_unit_tests_eal.py index c1de4884..ebf74b2d 100644 --- a/tests/TestSuite_unit_tests_eal.py +++ b/tests/TestSuite_unit_tests_eal.py @@ -35,8 +35,8 @@ class TestUnitTestsEal(TestCase): self.start_test_time = 60 self.run_cmd_time = 180 default_cores = "1S/4C/1T" - eal_params = self.dut.create_eal_parameters(cores=default_cores) - app_name = self.dut.apps_name["test"] + eal_params = self.sut_node.create_eal_parameters(cores=default_cores) + app_name = self.sut_node.apps_name["test"] self.test_app_cmdline = app_name + eal_params def set_up(self): @@ -50,13 +50,13 @@ class TestUnitTestsEal(TestCase): Run version autotest. """ - self.dut.send_expect( - self.dut.taskset(1) + " " + self.test_app_cmdline, + self.sut_node.send_expect( + self.sut_node.taskset(1) + " " + self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time, ) - out = self.dut.send_expect("version_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("version_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_memcopy(self): @@ -64,13 +64,13 @@ class TestUnitTestsEal(TestCase): Run memcopy autotest. """ - self.dut.send_expect( - self.dut.taskset(1) + " " + self.test_app_cmdline, + self.sut_node.send_expect( + self.sut_node.taskset(1) + " " + self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time, ) - out = self.dut.send_expect("memcpy_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("memcpy_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_common(self): @@ -78,13 +78,13 @@ class TestUnitTestsEal(TestCase): Run common autotest. """ - self.dut.send_expect( - self.dut.taskset(1) + " " + self.test_app_cmdline, + self.sut_node.send_expect( + self.sut_node.taskset(1) + " " + self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time, ) - out = self.dut.send_expect("common_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("common_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_eal_fs(self): @@ -92,13 +92,13 @@ class TestUnitTestsEal(TestCase): Run memcopy autotest. """ - self.dut.send_expect( - self.dut.taskset(1) + " " + self.test_app_cmdline, + self.sut_node.send_expect( + self.sut_node.taskset(1) + " " + self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time, ) - out = self.dut.send_expect("eal_fs_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("eal_fs_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_memcpy(self): @@ -106,28 +106,28 @@ class TestUnitTestsEal(TestCase): Run memcopy autotest. """ - self.dut.send_expect( - self.dut.taskset(1) + " " + self.test_app_cmdline, + self.sut_node.send_expect( + self.sut_node.taskset(1) + " " + self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time, ) - out = self.dut.send_expect("memcpy_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("memcpy_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_memcpy_perf(self): """ Run memcopy performance autotest. """ - self.dut.send_expect( - self.dut.taskset(1) + " " + self.test_app_cmdline, + self.sut_node.send_expect( + self.sut_node.taskset(1) + " " + self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time, ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "memcpy_perf_autotest", "RTE>>", self.run_cmd_time * 15 ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_hash(self): @@ -135,11 +135,11 @@ class TestUnitTestsEal(TestCase): Run hash autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("hash_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("hash_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") # Extendable Bucket Table enhances and guarantees insertion of 100% of # the keys for a given hash table size. Add the check that average @@ -156,13 +156,13 @@ class TestUnitTestsEal(TestCase): Run has performance autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "hash_perf_autotest", "RTE>>", self.run_cmd_time * 10 ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_hash_functions(self): @@ -170,13 +170,13 @@ class TestUnitTestsEal(TestCase): Run hash functions autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "hash_functions_autotest", "RTE>>", self.run_cmd_time ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_hash_multiwriter(self): @@ -184,13 +184,13 @@ class TestUnitTestsEal(TestCase): Run hash multiwriter autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "hash_multiwriter_autotest", "RTE>>", self.run_cmd_time ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_hash_readwrite(self): @@ -198,13 +198,13 @@ class TestUnitTestsEal(TestCase): Run hash readwrite autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "hash_readwrite_func_autotest", "RTE>>", self.run_cmd_time ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_hash_readwrite_lf(self): @@ -212,14 +212,14 @@ class TestUnitTestsEal(TestCase): Run hash readwrite_lf autotest. """ - eal_params = self.dut.create_eal_parameters() - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters() + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "hash_readwrite_lf_perf_autotest", "RTE>>", self.run_cmd_time * 3 ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_hash_readwrite_perf(self): @@ -227,14 +227,14 @@ class TestUnitTestsEal(TestCase): Run hash readwrite perf autotest. """ - eal_params = self.dut.create_eal_parameters(cores="1S/4C/1T") - self.dut.send_expect( + eal_params = self.sut_node.create_eal_parameters(cores="1S/4C/1T") + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "hash_readwrite_perf_autotest", "RTE>>", self.run_cmd_time * 3 ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_func_reentrancy(self): @@ -242,17 +242,17 @@ class TestUnitTestsEal(TestCase): Run function reentrancy autotest. """ - if self.dut.architecture == "x86_64": + if self.sut_node.architecture == "x86_64": cmdline = self.test_app_cmdline else: # mask cores only on socket 0 - app_name = self.dut.apps_name["test"] - cmdline = self.dut.taskset(1) + " " + app_name + " -n 1 -c 5" - self.dut.send_expect(cmdline, "R.*T.*E.*>.*>", self.start_test_time) - out = self.dut.send_expect( + app_name = self.sut_node.apps_name["test"] + cmdline = self.sut_node.taskset(1) + " " + app_name + " -n 1 -c 5" + self.sut_node.send_expect(cmdline, "R.*T.*E.*>.*>", self.start_test_time) + out = self.sut_node.send_expect( "func_reentrancy_autotest", "RTE>>", self.run_cmd_time ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_atomic(self): @@ -260,11 +260,11 @@ class TestUnitTestsEal(TestCase): Run atomic autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("atomic_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("atomic_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_memory(self): @@ -272,26 +272,26 @@ class TestUnitTestsEal(TestCase): Run memory autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("memory_autotest", "RTE>>", self.run_cmd_time * 5) + out = self.sut_node.send_expect("memory_autotest", "RTE>>", self.run_cmd_time * 5) regexp = "IOVA:0x[0-9a-f]*, len:([0-9a-f]*), virt:0x[0-9a-f]*, socket_id:[0-9]*" match = utils.regexp(out, regexp) size = int(match, 16) self.verify(size > 0, "bad size") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_lcore_launch(self): """ Run lcore autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("per_lcore_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("per_lcore_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_spinlock(self): @@ -299,11 +299,11 @@ class TestUnitTestsEal(TestCase): Run spinlock autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("spinlock_autotest", "RTE>>", self.run_cmd_time * 2) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("spinlock_autotest", "RTE>>", self.run_cmd_time * 2) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_prefetch(self): @@ -311,11 +311,11 @@ class TestUnitTestsEal(TestCase): Run prefetch autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("prefetch_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("prefetch_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_byteorder(self): @@ -323,11 +323,11 @@ class TestUnitTestsEal(TestCase): Run byte order autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("byteorder_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("byteorder_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_logs(self): @@ -335,11 +335,11 @@ class TestUnitTestsEal(TestCase): Run logs autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("logs_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("logs_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_memzone(self): @@ -347,11 +347,11 @@ class TestUnitTestsEal(TestCase): Run memzone autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("memzone_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("memzone_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_debug(self): @@ -359,11 +359,11 @@ class TestUnitTestsEal(TestCase): Run debug autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("debug_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("debug_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_cpuflags(self): @@ -371,11 +371,11 @@ class TestUnitTestsEal(TestCase): Run CPU flags autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("cpuflags_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("cpuflags_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_errno(self): @@ -383,13 +383,13 @@ class TestUnitTestsEal(TestCase): Run errno autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*TE>>|RT.*E>>|RTE.*>>|RTE>.*>", self.start_test_time, ) - out = self.dut.send_expect("errno_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("errno_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_interrupts(self): @@ -398,31 +398,31 @@ class TestUnitTestsEal(TestCase): """ self.verify(self.env == "linuxapp", "Interrupt only supported in linux env") - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*TE>>|RT.*E>>|RTE.*>>|RTE>.*>", self.start_test_time, ) - out = self.dut.send_expect("interrupt_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("interrupt_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_multiprocess(self): """ Run multiprocess autotest. """ - if self.dut.get_os_type() == "freebsd": - self.dut.send_expect( + if self.sut_node.get_os_type() == "freebsd": + self.sut_node.send_expect( self.test_app_cmdline + " -m 64 --base-virtaddr=0x1000000000", "R.*T.*E.*>.*>", self.start_test_time, ) else: - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline + " -m 64", "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("multiprocess_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("multiprocess_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_string(self): @@ -430,11 +430,11 @@ class TestUnitTestsEal(TestCase): Run string autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("string_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("string_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_tailq(self): @@ -442,11 +442,11 @@ class TestUnitTestsEal(TestCase): Run tailq autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("tailq_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("tailq_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_kvargs(self): @@ -454,29 +454,29 @@ class TestUnitTestsEal(TestCase): Run kvargs autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("kvargs_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("kvargs_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_acl(self): """ Run acl autotest. """ - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( other_eal_param="force-max-simd-bitwidth" ) - app_name = self.dut.apps_name["test"] + app_name = self.sut_node.apps_name["test"] test_app_cmdline = app_name + eal_params test_app_cmdline += "--no-pci" - if self.dut.dpdk_version >= "20.11.0": + if self.sut_node.dpdk_version >= "20.11.0": test_app_cmdline += " --force-max-simd-bitwidth=0" - self.dut.send_expect(test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time) - out = self.dut.send_expect("acl_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect(test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time) + out = self.sut_node.send_expect("acl_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_link_bonding(self): @@ -484,29 +484,29 @@ class TestUnitTestsEal(TestCase): Run acl autotest. """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect("link_bonding_autotest", "RTE>>", self.run_cmd_time) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("link_bonding_autotest", "RTE>>", self.run_cmd_time) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_link_bonding_rssconf(self): """ """ - self.dut.send_expect( + self.sut_node.send_expect( self.test_app_cmdline, "R.*T.*E.*>.*>", self.start_test_time ) - out = self.dut.send_expect( + out = self.sut_node.send_expect( "link_bonding_rssconf_autotest", "RTE>>", self.run_cmd_time ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_unit_tests_event_timer.py b/tests/TestSuite_unit_tests_event_timer.py index f48d8e4e..44c112e3 100644 --- a/tests/TestSuite_unit_tests_event_timer.py +++ b/tests/TestSuite_unit_tests_event_timer.py @@ -19,25 +19,25 @@ class TestUnitTestEventTimer(TestCase): PMD prerequisites. """ - cores = self.dut.get_core_list("all") + cores = self.sut_node.get_core_list("all") self.coremask = utils.create_mask(cores) # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports() - self.app_name = self.dut.apps_name["test"] + self.sut_ports = self.sut_node.get_ports() + self.app_name = self.sut_node.apps_name["test"] if self.nic == "cavium_a063" or self.nic == "cavium_a064": self.eventdev_device_bus_id = "0002:0e:00.0" self.eventdev_device_id = "a0f9" #### Bind evendev device #### - self.dut.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) + self.sut_node.bind_eventdev_port(port_to_bind=self.eventdev_device_bus_id) #### Configuring evendev SS0 & SSOw limits #### - self.dut.set_eventdev_port_limits( + self.sut_node.set_eventdev_port_limits( self.eventdev_device_id, self.eventdev_device_bus_id ) elif self.nic == "cavium_a034": self.eventdev_timer_device_bus_id = "0000:0a:01.0" - self.dut.bind_eventdev_port(port_to_bind=self.eventdev_timer_device_bus_id) + self.sut_node.bind_eventdev_port(port_to_bind=self.eventdev_timer_device_bus_id) def set_up(self): """ @@ -51,21 +51,21 @@ class TestUnitTestEventTimer(TestCase): """ if self.nic == "cavium_a063" or self.nic == "cavium_a064": - self.dut.send_expect( + self.sut_node.send_expect( "./%s -n 1 -c %s -a %s,single_ws=1,tim_stats_ena=1" % (self.app_name, self.coremask, self.eventdev_device_bus_id), "R.*T.*E.*>.*>", 60, ) elif self.nic == "cavium_a034": - self.dut.send_expect( + self.sut_node.send_expect( "./%s -n 1 -c %s -a %s,timvf_stats=1" % (self.app_name, self.coremask, self.eventdev_timer_device_bus_id), "R.*T.*E.*>.*>", 60, ) - out = self.dut.send_expect("event_timer_adapter_test", "RTE>>", 300) - self.dut.send_expect("quit", "# ") + out = self.sut_node.send_expect("event_timer_adapter_test", "RTE>>", 300) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") return "SUCCESS" @@ -80,9 +80,9 @@ class TestUnitTestEventTimer(TestCase): Run after each test suite. """ if self.nic == "cavium_a063" or self.nic == "cavium_a064": - self.dut.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) + self.sut_node.unbind_eventdev_port(port_to_unbind=self.eventdev_device_bus_id) elif self.nic == "cavium_a034": - self.dut.unbind_eventdev_port( + self.sut_node.unbind_eventdev_port( port_to_unbind=self.eventdev_timer_device_bus_id ) - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_unit_tests_kni.py b/tests/TestSuite_unit_tests_kni.py index 65515c21..9a554ea4 100644 --- a/tests/TestSuite_unit_tests_kni.py +++ b/tests/TestSuite_unit_tests_kni.py @@ -26,12 +26,12 @@ class TestUnitTestsKni(TestCase): def insmod_kni(self): - out = self.dut.send_expect("lsmod | grep rte_kni", "# ") + out = self.sut_node.send_expect("lsmod | grep rte_kni", "# ") if "rte_kni" in out: - self.dut.send_expect("rmmod rte_kni.ko", "# ") + self.sut_node.send_expect("rmmod rte_kni.ko", "# ") - out = self.dut.send_expect( + out = self.sut_node.send_expect( "insmod ./%s/kmod/rte_kni.ko lo_mode=lo_mode_fifo" % (self.target), "# " ) @@ -48,7 +48,7 @@ class TestUnitTestsKni(TestCase): KNI Prerequisites """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") self.insmod_kni() def set_up(self): @@ -61,11 +61,11 @@ class TestUnitTestsKni(TestCase): """ Run kni autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("kni_autotest", "RTE>>", 60) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("kni_autotest", "RTE>>", 60) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test Failed") @@ -79,4 +79,4 @@ class TestUnitTestsKni(TestCase): """ Run after each test suite. """ - self.dut.send_expect("rmmod rte_kni", "# ", 5) + self.sut_node.send_expect("rmmod rte_kni", "# ", 5) diff --git a/tests/TestSuite_unit_tests_loopback.py b/tests/TestSuite_unit_tests_loopback.py index f596e242..4d5ffc36 100644 --- a/tests/TestSuite_unit_tests_loopback.py +++ b/tests/TestSuite_unit_tests_loopback.py @@ -34,11 +34,11 @@ class TestUnitTestsLoopback(TestCase): Power Prerequisites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.cores = self.dut.get_core_list("all") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.cores = self.sut_node.get_core_list("all") [self.arch, machine, env, toolchain] = self.target.split("-") self.verify( @@ -46,15 +46,15 @@ class TestUnitTestsLoopback(TestCase): "pmd perf request running in x86_64 or arm64", ) self.max_traffic_burst = self.get_max_traffic_burst() - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/#define MAX_TRAFFIC_BURST %s/#define MAX_TRAFFIC_BURST 32/' app/test/test_pmd_perf.c" % self.max_traffic_burst, "# ", 30, ) self.tmp_path = "/tmp/test_pmd_perf.c" - self.dut.send_expect("rm -fr %s" % self.tmp_path, "# ") - self.dut.send_expect("cp app/test/test_pmd_perf.c %s" % self.tmp_path, "# ") + self.sut_node.send_expect("rm -fr %s" % self.tmp_path, "# ") + self.sut_node.send_expect("cp app/test/test_pmd_perf.c %s" % self.tmp_path, "# ") def set_up(self): """ @@ -63,7 +63,7 @@ class TestUnitTestsLoopback(TestCase): pass def get_max_traffic_burst(self): - pmd_file = self.dut.send_expect( + pmd_file = self.sut_node.send_expect( "cat app/test/test_pmd_perf.c", "# ", 30, trim_whitespace=False ) result_scanner = r"#define MAX_TRAFFIC_BURST\s+([0-9]+)" @@ -76,85 +76,85 @@ class TestUnitTestsLoopback(TestCase): """ Run pmd stream control mode burst test case. """ - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/lpbk_mode = 0/lpbk_mode = 1/' app/test/test_pmd_perf.c", "# ", 30, ) - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) - self.tester.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -i %s ether[12:2] != '0x88cc' -w ./getPackageByTcpdump.cap 2> /dev/null& " - % self.tester_itf, + % self.tg_itf, "#", ) - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("pmd_perf_autotest", "RTE>>", 120) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("pmd_perf_autotest", "RTE>>", 120) print(out) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") - self.tester.send_expect("killall tcpdump", "#") - tester_out = self.tester.send_expect( + self.tg_node.send_expect("killall tcpdump", "#") + tg_out = self.tg_node.send_expect( "tcpdump -nn -e -v -r ./getPackageByTcpdump.cap", "#" ) - self.verify("ethertype" not in tester_out, "Test failed") + self.verify("ethertype" not in tg_out, "Test failed") def test_link_mode(self): """ Run pmd stream control mode burst test case. """ - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/lpbk_mode = 1/lpbk_mode = 0/' app/test/test_pmd_perf.c", "# ", 30, ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e '/check_all_ports_link_status(nb_ports, RTE_PORT_ALL);/a\ sleep(6);' app/test/test_pmd_perf.c", "# ", 30, ) - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) - self.tester.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") + self.tg_node.send_expect( "tcpdump -i %s -w ./getPackageByTcpdump.cap 2> /dev/null& " - % self.tester_itf, + % self.tg_itf, "#", ) - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - self.dut.send_command("pmd_perf_autotest", 30) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + self.sut_node.send_command("pmd_perf_autotest", 30) # There is no packet loopback, so the test is hung. # It needs to kill the process manually. - self.dut.kill_all() - self.tester.send_expect("killall tcpdump", "#") - tester_out = self.tester.send_expect( + self.sut_node.kill_all() + self.tg_node.send_expect("killall tcpdump", "#") + tg_out = self.tg_node.send_expect( "tcpdump -nn -e -v -r ./getPackageByTcpdump.cap", "#" ) - self.verify("ethertype IPv4" in tester_out, "Test failed") + self.verify("ethertype IPv4" in tg_out, "Test failed") def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("rm -fr app/test/test_pmd_perf.c", "# ") - self.dut.send_expect("cp %s app/test/test_pmd_perf.c" % self.tmp_path, "# ") - self.dut.kill_all() + self.sut_node.send_expect("rm -fr app/test/test_pmd_perf.c", "# ") + self.sut_node.send_expect("cp %s app/test/test_pmd_perf.c" % self.tmp_path, "# ") + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/#define MAX_TRAFFIC_BURST 32/#define MAX_TRAFFIC_BURST %s/' app/test/test_pmd_perf.c" % self.max_traffic_burst, "# ", 30, ) - self.dut.build_install_dpdk(self.target) - self.dut.kill_all() + self.sut_node.build_install_dpdk(self.target) + self.sut_node.kill_all() diff --git a/tests/TestSuite_unit_tests_lpm.py b/tests/TestSuite_unit_tests_lpm.py index f3ad6355..2e6b948e 100644 --- a/tests/TestSuite_unit_tests_lpm.py +++ b/tests/TestSuite_unit_tests_lpm.py @@ -33,7 +33,7 @@ class TestUnitTestsLpmIpv6(TestCase): Qos Prerequisites """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -45,44 +45,44 @@ class TestUnitTestsLpmIpv6(TestCase): """ Run lpm for IPv4 autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("lpm_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("lpm_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_lpm_ipv6(self): """ Run lpm for IPv6 autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("lpm6_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("lpm6_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_lpm_perf(self): """ Run lpm for IPv4 performance autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("lpm_perf_autotest", "RTE>>", 600) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("lpm_perf_autotest", "RTE>>", 600) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_lpm_ipv6_perf(self): """ Run lpm for IPv6 performance autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("lpm6_perf_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("lpm6_perf_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_mbuf.py b/tests/TestSuite_unit_tests_mbuf.py index c7962abd..a273fa11 100644 --- a/tests/TestSuite_unit_tests_mbuf.py +++ b/tests/TestSuite_unit_tests_mbuf.py @@ -30,7 +30,7 @@ class TestUnitTestsMbuf(TestCase): """ Run at the start of each test suite. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -43,11 +43,11 @@ class TestUnitTestsMbuf(TestCase): Run mbuf autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("mbuf_autotest", "RTE>>", 180) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("mbuf_autotest", "RTE>>", 180) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_mempool.py b/tests/TestSuite_unit_tests_mempool.py index 25cefecd..1fc92b3b 100644 --- a/tests/TestSuite_unit_tests_mempool.py +++ b/tests/TestSuite_unit_tests_mempool.py @@ -30,7 +30,7 @@ class TestUnitTestsMempool(TestCase): """ Run at the start of each test suite. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -43,22 +43,22 @@ class TestUnitTestsMempool(TestCase): Run memory pool autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("mempool_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("mempool_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_mempool_perf(self): """ Run memory pool performance autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("mempool_perf_autotest", "RTE>>", 4500) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("mempool_perf_autotest", "RTE>>", 4500) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_pmd_perf.py b/tests/TestSuite_unit_tests_pmd_perf.py index c1c35d27..b5648132 100644 --- a/tests/TestSuite_unit_tests_pmd_perf.py +++ b/tests/TestSuite_unit_tests_pmd_perf.py @@ -32,8 +32,8 @@ class TestUnitTestsPmdPerf(TestCase): Power Prerequisites """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.arch = self.target.split("-")[0] self.verify( self.arch in ["x86_64", "arm64"], @@ -42,8 +42,8 @@ class TestUnitTestsPmdPerf(TestCase): self.burst_ctlmodes = ["poll_before_xmit", "poll_after_xmit"] self.rxtx_modes = ["vector", "scalar", "full", "hybrid"] self.anchors = ["rxtx", "rxonly", "txonly"] - socket_id = self.dut.ports_info[0]["port"].socket - self.cores = self.dut.get_core_list(config="1S/4C/1T", socket=socket_id) + socket_id = self.sut_node.ports_info[0]["port"].socket + self.cores = self.sut_node.get_core_list(config="1S/4C/1T", socket=socket_id) def set_up(self): """ @@ -56,19 +56,19 @@ class TestUnitTestsPmdPerf(TestCase): Run pmd stream control mode burst test case. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores, ports=[0, 1]) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores, ports=[0, 1]) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) for mode in self.burst_ctlmodes: - self.dut.send_expect("set_rxtx_sc %s" % mode, "RTE>>", 10) - out = self.dut.send_expect("pmd_perf_autotest", "RTE>>", 120) + self.sut_node.send_expect("set_rxtx_sc %s" % mode, "RTE>>", 10) + out = self.sut_node.send_expect("pmd_perf_autotest", "RTE>>", 120) match_regex = "Result: (\d+) cycles per packet" m = re.compile(r"%s" % match_regex, re.S) result = m.search(out) self.verify(result, "Failed to get result") self.logger.info("Mode %s latency is %s" % (mode, result.group(1))) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_pmd_continues(self): """ @@ -78,31 +78,31 @@ class TestUnitTestsPmdPerf(TestCase): self.table_header = ["Mode"] self.table_header += self.anchors self.result_table_create(self.table_header) - eal_params = self.dut.create_eal_parameters(cores=self.cores, ports=[0, 1]) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores, ports=[0, 1]) print((self.table_header)) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) for mode in self.rxtx_modes: table_row = [mode] - self.dut.send_expect("set_rxtx_sc continuous", "RTE>>", 10) - self.dut.send_expect("set_rxtx_mode %s" % mode, "RTE>>", 10) + self.sut_node.send_expect("set_rxtx_sc continuous", "RTE>>", 10) + self.sut_node.send_expect("set_rxtx_mode %s" % mode, "RTE>>", 10) for anchor in self.anchors: - self.dut.send_expect("set_rxtx_anchor %s" % anchor, "RTE>>", 10) - out = self.dut.send_expect("pmd_perf_autotest", "RTE>>", 120) + self.sut_node.send_expect("set_rxtx_anchor %s" % anchor, "RTE>>", 10) + out = self.sut_node.send_expect("pmd_perf_autotest", "RTE>>", 120) match_regex = "Result: (\d+) cycles per packet" m = re.compile(r"%s" % match_regex, re.S) result = m.search(out) self.verify(result, "Failed to get result") table_row.append(result.group(1)) self.result_table_add(table_row) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.result_table_print() def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_unit_tests_power.py b/tests/TestSuite_unit_tests_power.py index a3214964..689b8080 100644 --- a/tests/TestSuite_unit_tests_power.py +++ b/tests/TestSuite_unit_tests_power.py @@ -31,7 +31,7 @@ class TestUnitTestsPower(TestCase): Power Prerequisites """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -44,11 +44,11 @@ class TestUnitTestsPower(TestCase): Run power autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("power_autotest", "RTE>>", 60) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("power_autotest", "RTE>>", 60) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_power_cpufreq(self): @@ -57,11 +57,11 @@ class TestUnitTestsPower(TestCase): """ # This acpi driver test case need correct BIOS and Grub settings. # otherwise, the power lib initialization will be failed - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("power_cpufreq_autotest", "RTE>>", 60) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("power_cpufreq_autotest", "RTE>>", 60) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_power_caps(self): @@ -70,11 +70,11 @@ class TestUnitTestsPower(TestCase): """ # This acpi driver test case need correct BIOS and Grub settings. # otherwise, the power lib initialization will be failed - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("power_caps_autotest", "RTE>>", 60) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("power_caps_autotest", "RTE>>", 60) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_qos.py b/tests/TestSuite_unit_tests_qos.py index 2938d7c9..710fb7cd 100644 --- a/tests/TestSuite_unit_tests_qos.py +++ b/tests/TestSuite_unit_tests_qos.py @@ -32,7 +32,7 @@ class TestUnitTestsQos(TestCase): QoS Prerequisites """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -45,11 +45,11 @@ class TestUnitTestsQos(TestCase): Run RED autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 30) - out = self.dut.send_expect("red_autotest", "RTE>>", 180) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 30) + out = self.sut_node.send_expect("red_autotest", "RTE>>", 180) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_meter(self): @@ -57,11 +57,11 @@ class TestUnitTestsQos(TestCase): Run meter autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 30) - out = self.dut.send_expect("meter_autotest", "RTE>>", 5) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 30) + out = self.sut_node.send_expect("meter_autotest", "RTE>>", 5) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_sched(self): @@ -75,11 +75,11 @@ class TestUnitTestsQos(TestCase): "Sched auto_test only support in x86_64 or arm64 ppc_64", ) - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 30) - out = self.dut.send_expect("sched_autotest", "RTE>>", 5) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 30) + out = self.sut_node.send_expect("sched_autotest", "RTE>>", 5) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_ring.py b/tests/TestSuite_unit_tests_ring.py index 91c5788c..376a7049 100644 --- a/tests/TestSuite_unit_tests_ring.py +++ b/tests/TestSuite_unit_tests_ring.py @@ -30,7 +30,7 @@ class TestUnitTestsRing(TestCase): """ Run at the start of each test suite. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -43,10 +43,10 @@ class TestUnitTestsRing(TestCase): Run ring autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("ring_autotest", "RTE>>", 36000) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("ring_autotest", "RTE>>", 36000) self.verify("Test OK" in out, "Test failed") def test_ring_performance(self): @@ -54,11 +54,11 @@ class TestUnitTestsRing(TestCase): Run ring performance autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("ring_perf_autotest", "RTE>>", 210) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("ring_perf_autotest", "RTE>>", 210) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_ringpmd.py b/tests/TestSuite_unit_tests_ringpmd.py index 4f4442b9..7c9754f4 100644 --- a/tests/TestSuite_unit_tests_ringpmd.py +++ b/tests/TestSuite_unit_tests_ringpmd.py @@ -31,7 +31,7 @@ class TestUnitTestsRingPmd(TestCase): Run at the start of each test suite. Nothing to do here. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") def set_up(self): """ @@ -47,20 +47,20 @@ class TestUnitTestsRingPmd(TestCase): dev_str1 = "net_ring0" dev_str2 = "net_ring1" - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 10) - out = self.dut.send_expect("ring_pmd_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 10) + out = self.sut_node.send_expect("ring_pmd_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Default no eth_ring devices Test failed") - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, vdevs=[dev_str1, dev_str2] ) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 10) - out = self.dut.send_expect("ring_pmd_autotest", "RTE>>", 120) - self.dut.send_expect("quit", "# ") + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 10) + out = self.sut_node.send_expect("ring_pmd_autotest", "RTE>>", 120) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Two eth_ring devices test failed") def tear_down(self): diff --git a/tests/TestSuite_unit_tests_timer.py b/tests/TestSuite_unit_tests_timer.py index 82d82fa2..09d63dbf 100644 --- a/tests/TestSuite_unit_tests_timer.py +++ b/tests/TestSuite_unit_tests_timer.py @@ -30,7 +30,7 @@ class TestUnitTestsTimer(TestCase): """ Run at the start of each test suite. """ - self.cores = self.dut.get_core_list("all") + self.cores = self.sut_node.get_core_list("all") # # change timeout base number of cores on the system # default 60 secs @@ -54,23 +54,23 @@ class TestUnitTestsTimer(TestCase): """ Run timer autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) timeout = self.get_nic_timeout() - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", timeout) - out = self.dut.send_expect("timer_autotest", "RTE>>", self.this_timeout) - self.dut.send_expect("quit", "# ") + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", timeout) + out = self.sut_node.send_expect("timer_autotest", "RTE>>", self.this_timeout) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def test_timer_perf(self): """ Run timer autotest. """ - eal_params = self.dut.create_eal_parameters(cores=self.cores) - app_name = self.dut.apps_name["test"] - self.dut.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) - out = self.dut.send_expect("timer_perf_autotest", "RTE>>", self.this_timeout) - self.dut.send_expect("quit", "# ") + eal_params = self.sut_node.create_eal_parameters(cores=self.cores) + app_name = self.sut_node.apps_name["test"] + self.sut_node.send_expect(app_name + eal_params, "R.*T.*E.*>.*>", 60) + out = self.sut_node.send_expect("timer_perf_autotest", "RTE>>", self.this_timeout) + self.sut_node.send_expect("quit", "# ") self.verify("Test OK" in out, "Test failed") def tear_down(self): diff --git a/tests/TestSuite_userspace_ethtool.py b/tests/TestSuite_userspace_ethtool.py index adbba999..4e734d88 100644 --- a/tests/TestSuite_userspace_ethtool.py +++ b/tests/TestSuite_userspace_ethtool.py @@ -14,10 +14,10 @@ import time import framework.utils as utils from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE, SCAPY2IXIA from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.utils import RED @@ -26,26 +26,26 @@ class TestUserspaceEthtool(TestCase): """ Run at the start of each test suite. """ - self.ports = self.dut.get_ports() + self.ports = self.sut_node.get_ports() self.verify(len(self.ports) >= 2, "No ports found for " + self.nic) # build sample app - out = self.dut.build_dpdk_apps("examples/ethtool") + out = self.sut_node.build_dpdk_apps("examples/ethtool") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") - self.app_ethtool_path = self.dut.apps_name["ethtool"] - used_dut_port_pci = self.dut.ports_info[self.ports[0]]["port"].pci - out = self.dut.send_expect( - "cat /sys/bus/pci/devices/%s/numa_node " % used_dut_port_pci, "# " + self.app_ethtool_path = self.sut_node.apps_name["ethtool"] + used_sut_port_pci = self.sut_node.ports_info[self.ports[0]]["port"].pci + out = self.sut_node.send_expect( + "cat /sys/bus/pci/devices/%s/numa_node " % used_sut_port_pci, "# " ) - cpu_cores = self.dut.send_expect( + cpu_cores = self.sut_node.send_expect( 'lscpu |grep "NUMA node%s CPU(s):"' % out, "# " ) core = re.findall(r"\d+-(\d+)", cpu_cores)[0] core = int(core) cores = [core - 1, core - 2, core - 3, core - 4] - eal_para = self.dut.create_eal_parameters(cores=cores) + eal_para = self.sut_node.create_eal_parameters(cores=cores) self.cmd = "%s %s" % (self.app_ethtool_path, eal_para) # pause frame basic configuration @@ -53,7 +53,7 @@ class TestUserspaceEthtool(TestCase): self.frame_size = 64 self.pause_rate = 0.50 # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -62,12 +62,12 @@ class TestUserspaceEthtool(TestCase): pass def build_ethtool(self): - out = self.dut.build_dpdk_apps("examples/ethtool") + out = self.sut_node.build_dpdk_apps("examples/ethtool") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") def strip_portstats(self, portid): - out = self.dut.send_expect("portstats %d " % portid, "EthApp>") + out = self.sut_node.send_expect("portstats %d " % portid, "EthApp>") stats_pattern = ( r"portstats (\d)(\s+)Port (\d+) stats(\s+)In: (\d+)" " \((\d+) bytes\)(\s+)Out: (\d+) \((\d+) bytes\)" @@ -81,7 +81,7 @@ class TestUserspaceEthtool(TestCase): return (0, 0) def strip_ringparam(self, portid): - out = self.dut.send_expect("ringparam %d" % portid, "EthApp>") + out = self.sut_node.send_expect("ringparam %d" % portid, "EthApp>") ring_pattern = ( r"ringparam (\d)(\s+)Port (\d+) ring parameters(\s+)" "Rx Pending: (\d+) \((\d+) max\)(\s+)Tx Pending: " @@ -94,7 +94,7 @@ class TestUserspaceEthtool(TestCase): return (0, 0, 0, 0) def strip_mac(self, portid): - out = self.dut.send_expect("macaddr %d" % portid, "EthApp>") + out = self.sut_node.send_expect("macaddr %d" % portid, "EthApp>") mac_pattern = r"macaddr (\d+)(\s+)Port (\d+) MAC Address: (.*)" m = re.match(mac_pattern, out) if m: @@ -104,9 +104,9 @@ class TestUserspaceEthtool(TestCase): def strip_mtu(self, intf): """ - Strip tester port mtu + Strip TG port mtu """ - link_info = self.tester.send_expect("ip link show %s" % intf, "# ") + link_info = self.tg_node.send_expect("ip link show %s" % intf, "# ") mtu_pattern = r".* mtu (\d+) .*" m = re.match(mtu_pattern, link_info) if m: @@ -115,7 +115,7 @@ class TestUserspaceEthtool(TestCase): return 1518 def strip_md5(self, filename): - md5_info = self.dut.send_expect("md5sum %s" % filename, "# ") + md5_info = self.sut_node.send_expect("md5sum %s" % filename, "# ") md5_pattern = r"(\w+) (\w+)" m = re.match(md5_pattern, md5_info) if m: @@ -175,7 +175,7 @@ class TestUserspaceEthtool(TestCase): # get nic driver information using linux's ethtool pattern = "(.*): (.*)" firmwarePat = "0x([0-9a-f]+)" - infos = self.dut.send_expect("ethtool -i %s" % port_name, "# ").splitlines() + infos = self.sut_node.send_expect("ethtool -i %s" % port_name, "# ").splitlines() sys_nic_info = {} for info in infos: if not info: @@ -218,7 +218,7 @@ class TestUserspaceEthtool(TestCase): retries = 0 reg_str = "Port\s+{}:\s+(Up|Down)".format(port_id) while retries < 5: - out = self.dut.send_expect("link", "EthApp> ", 10) + out = self.sut_node.send_expect("link", "EthApp> ", 10) if out is not None: status = re.search(reg_str, out).group(1) if status == expected_status: @@ -231,9 +231,9 @@ class TestUserspaceEthtool(TestCase): """ Test ethtool can dump basic information """ - self.dut.send_expect(self.cmd, "EthApp>", 60) - dpdk_driver_msg = self.dut.send_expect("drvinfo", "EthApp>") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect(self.cmd, "EthApp>", 60) + dpdk_driver_msg = self.sut_node.send_expect("drvinfo", "EthApp>") + self.sut_node.send_expect("quit", "# ") dpdk_nic_infos, msg = self.dpdk_get_nic_info(dpdk_driver_msg) self.verify(dpdk_nic_infos, msg) @@ -242,7 +242,7 @@ class TestUserspaceEthtool(TestCase): portsinfo[index] = {} portinfo = portsinfo[index] port = self.ports[index] - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] # strip original driver portinfo["ori_driver"] = netdev.get_nic_driver() portinfo["net_dev"] = netdev @@ -262,7 +262,7 @@ class TestUserspaceEthtool(TestCase): portinfo = portsinfo[index] portinfo["net_dev"].bind_driver(portinfo["ori_driver"]) - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) # ethtool doesn't support port disconnect by tools of linux # only detect physical link disconnect status verify_pass = True @@ -270,13 +270,13 @@ class TestUserspaceEthtool(TestCase): if not (self.is_eth_series_nic(700) or self.is_eth_series_nic(800)): # check link status dump function for port in self.ports: - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) - self.tester.send_expect("ip link set dev %s down" % intf, "# ") + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) + self.tg_node.send_expect("ip link set dev %s down" % intf, "# ") # wait for link stable time.sleep(5) - out = self.dut.send_expect("link", "EthApp>", 60) + out = self.sut_node.send_expect("link", "EthApp>", 60) link_pattern = r"Port (\d+): (.*)" link_infos = out.split("\r\n") for link_info in link_infos: @@ -296,26 +296,26 @@ class TestUserspaceEthtool(TestCase): verify_pass = False for port in self.ports: - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) - self.tester.send_expect("ip link set dev %s up" % intf, "# ") + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) + self.tg_node.send_expect("ip link set dev %s up" % intf, "# ") # wait for link stable time.sleep(5) # check port stats function - pkt = Packet(pkt_type="UDP") + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") for port in self.ports: - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) ori_rx_pkts, ori_tx_pkts = self.strip_portstats(port) - pkt.send_pkt(self.tester, tx_port=intf, count=4) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) time.sleep(1) rx_pkts, tx_pkts = self.strip_portstats(port) self.verify( (rx_pkts == (ori_rx_pkts + 4)), "Failed to record Rx/Tx packets" ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") # Check port link down verification result if verify_pass == False: raise VerifyFailure(verify_msg) @@ -324,21 +324,21 @@ class TestUserspaceEthtool(TestCase): """ Test ethtool app can retrieve port register """ - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) portsinfo = [] ori_drivers = [] for portid in range(len(self.ports)): - self.dut.send_expect("regs %d regs_%d.bin" % (portid, portid), "EthApp>") + self.sut_node.send_expect("regs %d regs_%d.bin" % (portid, portid), "EthApp>") portinfo = {"portid": portid, "reg_file": "regs_%d.bin" % portid} portsinfo.append(portinfo) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") for index in range(len(self.ports)): port = self.ports[index] - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] portinfo = portsinfo[index] # strip original driver portinfo["ori_driver"] = netdev.get_nic_driver() @@ -347,7 +347,7 @@ class TestUserspaceEthtool(TestCase): netdev.bind_driver() # get linux interface intf = netdev.get_interface_name() - out = self.dut.send_expect( + out = self.sut_node.send_expect( "ethtool -d %s raw off file %s" % (intf, portinfo["reg_file"]), "# " ) if "register" not in out or "CTRL" not in out: @@ -364,29 +364,29 @@ class TestUserspaceEthtool(TestCase): Test ethtool app dump eeprom function """ # require md5sum to check file - out = self.dut.send_expect("whereis md5sum", "# ") + out = self.sut_node.send_expect("whereis md5sum", "# ") self.verify( - "/usr/bin/md5sum" in out, "This case required md5sum installed on DUT" + "/usr/bin/md5sum" in out, "This case required md5sum installed on SUT" ) - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) portsinfo = [] ori_drivers = [] for portid in range(len(self.ports)): # dump eeprom by userspace ethtool - self.dut.send_expect( + self.sut_node.send_expect( "eeprom %d eeprom_%d.bin" % (portid, portid), "EthApp>" ) portinfo = {"portid": portid, "eeprom_file": "eeprom_%d.bin" % portid} portsinfo.append(portinfo) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") for index in range(len(self.ports)): port = self.ports[index] - netdev = self.dut.ports_info[port]["port"] + netdev = self.sut_node.ports_info[port]["port"] portinfo = portsinfo[index] # strip original driver portinfo["ori_driver"] = netdev.get_nic_driver() @@ -397,16 +397,16 @@ class TestUserspaceEthtool(TestCase): intf = netdev.get_interface_name() ethtool_eeprom = "ethtool_eeprom_%d.bin" % index # dump eeprom by linux ethtool - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --eeprom-dump %s raw on > %s" % (intf, ethtool_eeprom), "# " ) # wait for file ready time.sleep(2) # dpdk userspcae tools dump eeprom file size different with kernel ethtool dump dpdk_eeprom_size = int( - self.dut.send_expect("stat -c %%s %s" % portinfo["eeprom_file"], "# ") + self.sut_node.send_expect("stat -c %%s %s" % portinfo["eeprom_file"], "# ") ) - self.dut.send_expect( + self.sut_node.send_expect( "dd if=%s of=%s bs=%d count=1" % ( ethtool_eeprom, @@ -431,11 +431,11 @@ class TestUserspaceEthtool(TestCase): Test ethtool app ring parameter getting and setting """ for index in range(len(self.ports)): - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) port = self.ports[index] ori_rx_pkts, ori_tx_pkts = self.strip_portstats(port) _, rx_max, _, tx_max = self.strip_ringparam(index) - self.dut.send_expect( + self.sut_node.send_expect( "ringparam %d %d %d" % (index, tx_max, rx_max), "EthApp>" ) rx_ring, _, tx_ring, _ = self.strip_ringparam(index) @@ -445,77 +445,77 @@ class TestUserspaceEthtool(TestCase): self.verify( tx_ring == tx_max, "Userspace tool failed to set Tx ring parameter" ) - pkt = Packet(pkt_type="UDP") - tester_port = self.tester.get_local_port(port) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + tg_port = self.tg_node.get_local_port(port) self.verify( self.ethapp_check_link_status(index, "Up") == True, "Fail to Open port{}".format(index), ) - intf = self.tester.get_interface(tester_port) - pkt.send_pkt(self.tester, tx_port=intf, count=4) + intf = self.tg_node.get_interface(tg_port) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) rx_pkts, tx_pkts = self.strip_portstats(index) self.verify( rx_pkts == ori_rx_pkts + 4, "Failed to forward after ring parameter changed", ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_mac_address(self): """ Test ethtool app mac function """ valid_mac = "00:10:00:00:00:00" - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) for index in range(len(self.ports)): port = self.ports[index] - mac = self.dut.ports_info[port]["mac"] + mac = self.sut_node.ports_info[port]["mac"] dump_mac = self.strip_mac(index) self.verify( mac.lower() == dump_mac.lower(), "Userspace tool failed to dump mac" ) - self.dut.send_expect("macaddr %d %s" % (port, valid_mac), "EthApp>") + self.sut_node.send_expect("macaddr %d %s" % (port, valid_mac), "EthApp>") dump_mac = self.strip_mac(index) self.verify(dump_mac == valid_mac, "Userspace tool failed to set mac") # check forwarded mac has been changed - pkt = Packet(pkt_type="UDP") - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) self.verify( self.ethapp_check_link_status(index, "Up") == True, "Fail to Open port{}".format(index), ) # send and sniff packet - inst = self.tester.tcpdump_sniff_packets(intf) - pkt.send_pkt(self.tester, tx_port=intf, count=4) - pkts = self.tester.load_tcpdump_sniff_packets(inst, timeout=3) + inst = self.tg_node.tcpdump_sniff_packets(intf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst, timeout=3) self.verify(len(pkts) == 4, "Packet not forwarded as expected") src_mac = pkts.strip_layer_element("layer2", "src", p_index=0) self.verify(src_mac == valid_mac, "Forwarded packet not match default mac") # check multicast will not be valid mac invalid_mac = "01:00:00:00:00:00" - out = self.dut.send_expect("validate %s" % invalid_mac, "EthApp>") + out = self.sut_node.send_expect("validate %s" % invalid_mac, "EthApp>") self.verify("not unicast" in out, "Failed to detect incorrect unicast mac") invalid_mac = "00:00:00:00:00:00" - out = self.dut.send_expect("validate %s" % invalid_mac, "EthApp>") + out = self.sut_node.send_expect("validate %s" % invalid_mac, "EthApp>") self.verify("not unicast" in out, "Failed to detect incorrect unicast mac") - out = self.dut.send_expect("validate %s" % valid_mac, "EthApp>") + out = self.sut_node.send_expect("validate %s" % valid_mac, "EthApp>") self.verify("is unicast" in out, "Failed to detect correct unicast mac") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_port_config(self): """ Test ethtool app port configure """ - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) for index in range(len(self.ports)): port = self.ports[index] ori_rx_pkts, _ = self.strip_portstats(index) # add sleep time for update link status with Intel® Ethernet 700 Series nic time.sleep(10) # stop port - self.dut.send_expect("stop %d" % index, "EthApp>") + self.sut_node.send_expect("stop %d" % index, "EthApp>") # about ICE_25G-E810C_SFP(8086:1593),there have a kernel driver link status issue # about IXGBE_10G-X550T(8086:1563),driver do not write register to set link-down # so skip this step of verify status @@ -529,32 +529,32 @@ class TestUserspaceEthtool(TestCase): "Fail to stop port{}".format(index), ) # check packet not forwarded when port is stop - pkt = Packet(pkt_type="UDP") - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) - pkt.send_pkt(self.tester, tx_port=intf, count=4) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) rx_pkts, tx_pkts = self.strip_portstats(index) self.verify(rx_pkts == ori_rx_pkts, "Failed to stop port") # restart port and check packet can normally forwarded - self.dut.send_expect("open %d" % index, "EthApp>") + self.sut_node.send_expect("open %d" % index, "EthApp>") self.verify( self.ethapp_check_link_status(index, "Up") == True, "Fail to Open port{}".format(index), ) # wait few time for port ready rx_pkts, tx_pkts = self.strip_portstats(index) - pkt.send_pkt(self.tester, tx_port=intf, count=4) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) rx_pkts_open, tx_pkts_open = self.strip_portstats(index) self.verify(rx_pkts_open == rx_pkts + 4, "Failed to reopen port rx") self.verify(tx_pkts_open == tx_pkts + 4, "Failed to reopen port tx") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_port_mtu(self): """ Test ethtool app port mtu configure """ - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) mtus = [1519, 2048] mtu_threshold = 2022 offset = 0 @@ -567,14 +567,14 @@ class TestUserspaceEthtool(TestCase): for index in range(len(self.ports)): port = self.ports[index] # change mtu - tester_port = self.tester.get_local_port(port) - intf = self.tester.get_interface(tester_port) + tg_port = self.tg_node.get_local_port(port) + intf = self.tg_node.get_interface(tg_port) ori_mtu = self.strip_mtu(intf) - self.tester.send_expect("ifconfig %s mtu 9000" % (intf), "# ") + self.tg_node.send_expect("ifconfig %s mtu 9000" % (intf), "# ") for mtu in mtus: # Intel® Ethernet 800 Series should stop port before set mtu if self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"]: - self.dut.send_expect("stop %s" % index, "EthApp>") + self.sut_node.send_expect("stop %s" % index, "EthApp>") # The mtu threshold is 2022,When it is greater than 2022, the open/stop port is required. if mtu > mtu_threshold: @@ -584,41 +584,41 @@ class TestUserspaceEthtool(TestCase): "IGC-I225_LM", ]: mtu = mtu_threshold - self.dut.send_expect("stop %s" % index, "EthApp>") - self.dut.send_expect("mtu %d %d" % (index, mtu), "EthApp>") - self.dut.send_expect("open %s" % index, "EthApp>") - self.dut.send_expect("mtu %d %d" % (index, mtu), "EthApp>") + self.sut_node.send_expect("stop %s" % index, "EthApp>") + self.sut_node.send_expect("mtu %d %d" % (index, mtu), "EthApp>") + self.sut_node.send_expect("open %s" % index, "EthApp>") + self.sut_node.send_expect("mtu %d %d" % (index, mtu), "EthApp>") if self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"]: - self.dut.send_expect("open %s" % index, "EthApp>") + self.sut_node.send_expect("open %s" % index, "EthApp>") time.sleep(5) ori_rx_pkts, _ = self.strip_portstats(index) pkt_size = mtu + HEADER_SIZE["eth"] + offset - pkt = Packet(pkt_type="UDP", pkt_len=pkt_size) - pkt.send_pkt(self.tester, tx_port=intf, count=4) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=pkt_size) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) rx_pkts, _ = self.strip_portstats(index) self.verify( rx_pkts == ori_rx_pkts + 4, "Packet match mtu not forwarded as expected", ) if self.nic in ["cavium_a063", "cavium_a064"]: - pkt = Packet( + scapy_pkt_builder = ScapyPacketBuilder( pkt_type="UDP", pkt_len=mtu + 9 + HEADER_SIZE["eth"] + offset ) else: - pkt = Packet( + scapy_pkt_builder = ScapyPacketBuilder( pkt_type="UDP", pkt_len=mtu + 1 + HEADER_SIZE["eth"] + offset ) - pkt.send_pkt(self.tester, tx_port=intf, count=4) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=intf, count=4) rx_pkts_over, _ = self.strip_portstats(index) self.verify( rx_pkts == rx_pkts_over, "Packet over mtu should not be forwarded" ) - self.tester.send_expect("ifconfig %s mtu %d" % (intf, ori_mtu), "# ") + self.tg_node.send_expect("ifconfig %s mtu %d" % (intf, ori_mtu), "# ") - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") def test_perf_port_tx_pause(self): """ @@ -626,41 +626,41 @@ class TestUserspaceEthtool(TestCase): """ # sleep a while when receive packets main_file = "examples/ethtool/ethtool-app/main.c" - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e '/if (cnt_recv_frames > 0) {$/i\\usleep(10);' %s" % main_file, "# ", ) # build sample app self.build_ethtool() - self.dut.send_expect(self.cmd, "EthApp>", 60) + self.sut_node.send_expect(self.cmd, "EthApp>", 60) # enable pause tx - self.dut.send_expect("pause 0 tx", "EthApp") + self.sut_node.send_expect("pause 0 tx", "EthApp") tgen_input = [] headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["udp"] payload_size = self.frame_size - headers_size - dst_mac = self.dut.get_mac_address(0) - self.tester.scapy_append( + dst_mac = self.sut_node.get_mac_address(0) + self.tg_node.scapy_append( 'wrpcap("/root/pause_tx.pcap", [Ether(dst="%s")/IP()/UDP()/("X"*%d)])' % (dst_mac, payload_size) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() # rx and tx is the same port - tester_port = self.tester.get_local_port(self.ports[0]) - tgen_input.append((tester_port, tester_port, "/root/pause_tx.pcap")) + tg_port = self.tg_node.get_local_port(self.ports[0]) + tgen_input.append((tg_port, tg_port, "/root/pause_tx.pcap")) # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, None, self.tester.pktgen + tgen_input, 100, None, self.tg_node.perf_tg ) traffic_opt = {"throughput_stat_flag": True} - loss, rx_throughput = self.tester.pktgen._measure_loss( + loss, rx_throughput = self.tg_node.perf_tg._measure_loss( stream_ids=streams, options=traffic_opt ) tx_pps = rx_throughput[1] sent_pkts, recv_pkts = list(loss.values())[0][1:] - self.dut.send_expect("quit", "# ") - self.dut.send_expect("sed -i -e '/usleep(10);$/d' %s" % main_file, "# ") + self.sut_node.send_expect("quit", "# ") + self.sut_node.send_expect("sed -i -e '/usleep(10);$/d' %s" % main_file, "# ") # rebuild sample app self.build_ethtool() # verify ixia transmit line rate dropped @@ -677,12 +677,12 @@ class TestUserspaceEthtool(TestCase): """ Run after each test case. """ - self.dut.bind_interfaces_linux(self.drivername) - self.dut.kill_all() + self.sut_node.bind_interfaces_linux(self.drivername) + self.sut_node.kill_all() pass def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_vdev_primary_secondary.py b/tests/TestSuite_vdev_primary_secondary.py index 97e44daa..d13cf600 100644 --- a/tests/TestSuite_vdev_primary_secondary.py +++ b/tests/TestSuite_vdev_primary_secondary.py @@ -27,35 +27,35 @@ class TestVdevPrimarySecondary(TestCase): Run at the start of each test suite. """ self.queues = 2 - self.mem_channels = self.dut.get_memory_channels() - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("all", socket=self.ports_socket) + self.mem_channels = self.sut_node.get_memory_channels() + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("all", socket=self.ports_socket) self.vhost_cores = self.cores[0:6] self.verify(len(self.vhost_cores) >= 6, "The machine has too few cores.") - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.app_testpmd_path = self.dut.apps_name["test-pmd"] - self.app_symmetric_mp_path = self.dut.apps_name["symmetric_mp"] - self.app_hotplug_mp_path = self.dut.apps_name["hotplug_mp"] + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] + self.app_symmetric_mp_path = self.sut_node.apps_name["symmetric_mp"] + self.app_hotplug_mp_path = self.sut_node.apps_name["hotplug_mp"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] - self.vhost_user = self.dut.create_session("vhost-user") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) + self.vhost_user = self.sut_node.create_session("vhost-user") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") def setup_vm_env(self): """ Create testing environment """ self.virtio_mac = "52:54:00:00:00:0" - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") for i in range(self.queues): vm_params = {} vm_params["driver"] = "vhost-user" @@ -69,8 +69,8 @@ class TestVdevPrimarySecondary(TestCase): self.vm.set_vm_device(**vm_params) try: - self.vm_dut = self.vm.start() - if self.vm_dut is None: + self.vm_sut = self.vm.start() + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -135,17 +135,17 @@ class TestVdevPrimarySecondary(TestCase): self.vm_secondary.send_expect("attach {}".format(dev_pci), "example", 120) def prepare_symmetric_mp(self): - out = self.vm_dut.build_dpdk_apps("./examples/multi_process/symmetric_mp") + out = self.vm_sut.build_dpdk_apps("./examples/multi_process/symmetric_mp") self.verify("Error" not in out, "compilation symmetric_mp error") def prepare_hotplug_mp(self): - out = self.vm_dut.build_dpdk_apps("./examples/multi_process/hotplug_mp") + out = self.vm_sut.build_dpdk_apps("./examples/multi_process/hotplug_mp") self.verify("Error" not in out, "compilation hotplug_mp error") def close_session(self): - self.vm_dut.close_session(self.vm_primary) - self.vm_dut.close_session(self.vm_secondary) - self.dut.close_session(self.vhost_user) + self.vm_sut.close_session(self.vm_primary) + self.vm_sut.close_session(self.vm_secondary) + self.sut_node.close_session(self.vhost_user) def test_virtio_primary_and_secondary_process(self): vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net0,queues=2,client=1' --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1'" @@ -153,8 +153,8 @@ class TestVdevPrimarySecondary(TestCase): self.launch_testpmd(param=vhost_param, eal_param=vhost_eal_param) self.setup_vm_env() self.prepare_symmetric_mp() - self.vm_primary = self.vm_dut.new_session(suite="vm_primary") - self.vm_secondary = self.vm_dut.new_session(suite="vm_secondary") + self.vm_primary = self.vm_sut.new_session(suite="vm_primary") + self.vm_secondary = self.vm_sut.new_session(suite="vm_secondary") self.launch_symmetric_mp() self.vhost_user_pmd.execute_cmd("set fwd mac") self.vhost_user_pmd.execute_cmd("start tx_first") @@ -173,7 +173,7 @@ class TestVdevPrimarySecondary(TestCase): and len(result_secondary[1]) != 0, "RX no data", ) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("quit", "#", 15) def test_virtio_primay_and_secondary_process_hotplug(self): vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1' --vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1'" @@ -183,11 +183,11 @@ class TestVdevPrimarySecondary(TestCase): self.vhost_user_pmd.execute_cmd("start") self.setup_vm_env() self.prepare_hotplug_mp() - self.vm_primary = self.vm_dut.new_session(suite="vm_primary") - self.vm_secondary = self.vm_dut.new_session(suite="vm_secondary") + self.vm_primary = self.vm_sut.new_session(suite="vm_primary") + self.vm_secondary = self.vm_sut.new_session(suite="vm_secondary") self.launch_hotplug_mp() vm_ports = [] - for pci_info in self.vm_dut.ports_info: + for pci_info in self.vm_sut.ports_info: vm_ports.append(pci_info["pci"]) self.check_etherdev(dev_list=vm_ports) detach_pci = vm_ports[0] @@ -198,17 +198,17 @@ class TestVdevPrimarySecondary(TestCase): self.attach_etherdev_from_secondary(dev_pci=detach_pci) vm_ports.append(detach_pci) self.check_etherdev(dev_list=vm_ports) - self.dut.send_expect("quit", "#", 15) + self.sut_node.send_expect("quit", "#", 15) def tear_down(self): """ Run after each test case. """ - self.vm_dut.kill_all() - self.dut.kill_all() + self.vm_sut.kill_all() + self.sut_node.kill_all() self.vm.stop() - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_veb_switch.py b/tests/TestSuite_veb_switch.py index 77ba9559..cb6ea255 100644 --- a/tests/TestSuite_veb_switch.py +++ b/tests/TestSuite_veb_switch.py @@ -12,14 +12,14 @@ import re import time import framework.utils as utils -from framework.dut import Dut -from framework.packet import Packet from framework.pmd_output import PmdOutput -from framework.project_dpdk import DPDKdut +from framework.project_dpdk import DPDKSut +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE +from framework.sut_node import SutNode from framework.test_case import TestCase from framework.utils import RED -from framework.virt_dut import VirtDut +from framework.virt_sut import VirtSut class TestVEBSwitching(TestCase): @@ -51,7 +51,7 @@ class TestVEBSwitching(TestCase): tx_bytes_prefix = "TX-bytes:" if dev == "first": - out = self.dut.send_expect("show port stats %d" % portid, "testpmd> ") + out = self.sut_node.send_expect("show port stats %d" % portid, "testpmd> ") elif dev == "second": out = self.session_secondary.send_expect( "show port stats %d" % portid, "testpmd> " @@ -89,19 +89,19 @@ class TestVEBSwitching(TestCase): """ Send 1 packet """ - self.dut.send_expect("start", "testpmd>") - mac = self.dut.get_mac_address(0) + self.sut_node.send_expect("start", "testpmd>") + mac = self.sut_node.get_mac_address(0) if tran_type == "vlan": - pkt = Packet(pkt_type="VLAN_UDP") - pkt.config_layer("ether", {"dst": vf_mac}) - pkt.config_layer("vlan", {"vlan": 1}) - pkt.send_pkt(self.tester, tx_port=itf) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") + scapy_pkt_builder.config_layer("ether", {"dst": vf_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 1}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) time.sleep(0.5) else: - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": vf_mac}) - pkt.send_pkt(self.tester, tx_port=itf) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": vf_mac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=itf) time.sleep(0.5) def count_packet(self, out, mac): @@ -147,13 +147,13 @@ class TestVEBSwitching(TestCase): ], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.session_secondary = self.dut.new_session() - self.session_third = self.dut.new_session() - self.pmdout = PmdOutput(self.dut) - self.pmdout_2 = PmdOutput(self.dut, self.session_secondary) - self.pmdout_3 = PmdOutput(self.dut, self.session_third) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.session_secondary = self.sut_node.new_session() + self.session_third = self.sut_node.new_session() + self.pmdout = PmdOutput(self.sut_node) + self.pmdout_2 = PmdOutput(self.sut_node, self.session_secondary) + self.pmdout_3 = PmdOutput(self.sut_node, self.session_third) self.pf_kdriver_flag = 0 self.pf_ddriver_flag = 0 @@ -162,22 +162,22 @@ class TestVEBSwitching(TestCase): self.vf2_mac = "00:11:22:33:44:13" self.vf3_mac = "00:11:22:33:44:14" - self.used_dut_port = self.dut_ports[0] - localPort = self.tester.get_local_port(self.dut_ports[0]) - self.tester_itf = self.tester.get_interface(localPort) - self.pf_interface = self.dut.ports_info[self.used_dut_port]["intf"] - self.pf_mac_address = self.dut.get_mac_address(0) - self.pf_pci = self.dut.ports_info[self.used_dut_port]["pci"] + self.used_sut_port = self.sut_ports[0] + localPort = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_itf = self.tg_node.get_interface(localPort) + self.pf_interface = self.sut_node.ports_info[self.used_sut_port]["intf"] + self.pf_mac_address = self.sut_node.get_mac_address(0) + self.pf_pci = self.sut_node.ports_info[self.used_sut_port]["pci"] - self.dut.init_reserved_core() - self.cores_vf0 = self.dut.get_reserved_core("2C", 0) - self.cores_vf1 = self.dut.get_reserved_core("2C", 0) + self.sut_node.init_reserved_core() + self.cores_vf0 = self.sut_node.get_reserved_core("2C", 0) + self.cores_vf1 = self.sut_node.get_reserved_core("2C", 0) def set_up(self): """ This is to clear up environment before the case run. """ - self.dut.kill_all() + self.sut_node.kill_all() def setup_env(self, driver): """ @@ -185,18 +185,18 @@ class TestVEBSwitching(TestCase): kernel driver or dpdk driver. """ if driver == "default": - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] if driver == "default": - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.pf_interface, self.vf0_mac), "# ", 3, ) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 1 mac %s" % (self.pf_interface, self.vf1_mac), "# ", 3, @@ -223,9 +223,9 @@ class TestVEBSwitching(TestCase): time.sleep(2) self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) if driver == self.drivername: self.pf_ddriver_flag = 0 else: @@ -244,18 +244,18 @@ class TestVEBSwitching(TestCase): ports=[self.sriov_vfs_port[0].pci], param="--eth-peer=0,%s" % self.vf1_mac, ) - self.dut.send_expect("set fwd txonly", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set fwd txonly", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") self.pmdout_2.start_testpmd( self.cores_vf1, prefix="test2", ports=[self.sriov_vfs_port[1].pci] ) self.session_secondary.send_expect("set fwd rxonly", "testpmd>") self.session_secondary.send_expect("set promisc all off", "testpmd>") self.session_secondary.send_expect("start", "testpmd>", 5) - self.dut.send_expect("start", "testpmd>", 5) + self.sut_node.send_expect("start", "testpmd>", 5) time.sleep(2) - self.dut.send_expect("stop", "testpmd>", 5) + self.sut_node.send_expect("stop", "testpmd>", 5) self.session_secondary.send_expect("stop", "testpmd>", 5) vf0_tx_stats = self.veb_get_pmd_stats("first", 0, "tx") @@ -279,9 +279,9 @@ class TestVEBSwitching(TestCase): ports=[self.sriov_vfs_port[0].pci], param="--eth-peer=0,%s" % self.vf1_mac, ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( "Default", prefix="test2", ports=[self.sriov_vfs_port[1].pci] @@ -291,9 +291,9 @@ class TestVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.send_packet(self.vf0_mac, self.tester_itf) + self.send_packet(self.vf0_mac, self.tg_itf) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) vf0_tx_stats = self.veb_get_pmd_stats("first", 0, "tx") @@ -314,17 +314,17 @@ class TestVEBSwitching(TestCase): """ self.setup_env(driver="default") # the two vfs belongs to different vlans - self.dut.send_expect("ip link set %s vf 0 vlan 1" % self.pf_interface, "# ", 1) - self.dut.send_expect("ip link set %s vf 1 vlan 2" % self.pf_interface, "# ", 1) + self.sut_node.send_expect("ip link set %s vf 0 vlan 1" % self.pf_interface, "# ", 1) + self.sut_node.send_expect("ip link set %s vf 1 vlan 2" % self.pf_interface, "# ", 1) self.pmdout.start_testpmd( "Default", prefix="test1", ports=[self.sriov_vfs_port[0].pci], param="--eth-peer=0,%s" % self.vf1_mac, ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( "Default", prefix="test2", ports=[self.sriov_vfs_port[1].pci] @@ -334,9 +334,9 @@ class TestVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.send_packet(self.vf0_mac, self.tester_itf, "vlan") + self.send_packet(self.vf0_mac, self.tg_itf, "vlan") - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) vf0_tx_stats = self.veb_get_pmd_stats("first", 0, "tx") @@ -346,22 +346,22 @@ class TestVEBSwitching(TestCase): (vf0_tx_stats[0] == 1) and (vf1_rx_stats[0] == 0), "VF1 received packets from VF0, the vlan filter doen't work", ) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) self.session_secondary.send_expect("quit", "# ") time.sleep(2) # the two vfs belongs to the same vlan - self.dut.send_expect("ip link set %s vf 1 vlan 1" % self.pf_interface, "# ", 1) + self.sut_node.send_expect("ip link set %s vf 1 vlan 1" % self.pf_interface, "# ", 1) self.pmdout.start_testpmd( "Default", prefix="test1", ports=[self.sriov_vfs_port[0].pci], param="--eth-peer=0,%s" % self.vf1_mac, ) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( "Default", prefix="test2", ports=[self.sriov_vfs_port[1].pci] @@ -371,9 +371,9 @@ class TestVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.send_packet(self.vf0_mac, self.tester_itf, "vlan") + self.send_packet(self.vf0_mac, self.tg_itf, "vlan") - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) vf0_tx_stats = self.veb_get_pmd_stats("first", 0, "tx") @@ -394,10 +394,10 @@ class TestVEBSwitching(TestCase): # VF->PF self.setup_env(driver=self.drivername) self.pmdout.start_testpmd("Default", prefix="test1", ports=[self.pf_pci]) - self.dut.send_expect("set fwd rxonly", "testpmd>") - self.dut.send_expect("set verbose 1", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd rxonly", "testpmd>") + self.sut_node.send_expect("set verbose 1", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( "Default", @@ -413,9 +413,9 @@ class TestVEBSwitching(TestCase): self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) # print the packets which received by pf, this caused most packets missed. - out = self.dut.get_session_output(timeout=1) + out = self.sut_node.get_session_output(timeout=1) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) count_pkt = self.count_packet(out, self.vf0_mac) vf0_tx_stats = self.veb_get_pmd_stats("second", 0, "tx") @@ -424,7 +424,7 @@ class TestVEBSwitching(TestCase): self.verify(count_pkt > 100, "no packet was received by PF") self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) # PF->VF @@ -434,8 +434,8 @@ class TestVEBSwitching(TestCase): ports=[self.pf_pci], param="--eth-peer=0,%s" % self.vf0_mac, ) - self.dut.send_expect("set fwd txonly", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set fwd txonly", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") self.pmdout_2.start_testpmd( "Default", prefix="test2", ports=[self.sriov_vfs_port[0].pci] @@ -447,9 +447,9 @@ class TestVEBSwitching(TestCase): self.session_secondary.send_expect("set promisc all off", "testpmd>") self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) self.session_secondary.send_expect("stop", "testpmd>", 2) vf0_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") @@ -461,14 +461,14 @@ class TestVEBSwitching(TestCase): self.verify(vf0_rx_stats[0] > 100, "no packet was received by VF0") self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) - # tester->VF + # TG->VF self.pmdout.start_testpmd("Default", prefix="test1", ports=[self.pf_pci]) - self.dut.send_expect("set fwd mac", "testpmd>") - self.dut.send_expect("set promisc all off", "testpmd>") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("set fwd mac", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.pmdout_2.start_testpmd( @@ -483,27 +483,27 @@ class TestVEBSwitching(TestCase): time.sleep(2) vf0_start_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") - self.send_packet(self.vf0_mac, self.tester_itf) + self.send_packet(self.vf0_mac, self.tg_itf) time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf0_end_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") self.verify( vf0_end_rx_stats[0] - vf0_start_rx_stats[0] == 1, "no packet was received by VF0", ) - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("start", "testpmd>") time.sleep(2) self.session_secondary.send_expect("start", "testpmd>") time.sleep(2) vf0_start_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") pf_start_tx_stats = self.veb_get_pmd_stats("first", 0, "tx") pf_start_rx_stats = self.veb_get_pmd_stats("first", 0, "rx") - self.send_packet(self.pf_mac_address, self.tester_itf) + self.send_packet(self.pf_mac_address, self.tg_itf) time.sleep(2) self.session_secondary.send_expect("stop", "testpmd>", 2) - self.dut.send_expect("stop", "testpmd>", 2) + self.sut_node.send_expect("stop", "testpmd>", 2) vf0_end_rx_stats = self.veb_get_pmd_stats("second", 0, "rx") pf_end_tx_stats = self.veb_get_pmd_stats("first", 0, "tx") pf_end_rx_stats = self.veb_get_pmd_stats("first", 0, "rx") @@ -520,12 +520,12 @@ class TestVEBSwitching(TestCase): ) self.session_secondary.send_expect("quit", "# ") time.sleep(2) - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") time.sleep(2) # VF1->VF2 self.pmdout.start_testpmd("Default", prefix="test1", ports=[self.pf_pci]) - self.dut.send_expect("set promisc all off", "testpmd>") + self.sut_node.send_expect("set promisc all off", "testpmd>") self.pmdout_2.start_testpmd( self.cores_vf0, @@ -567,16 +567,16 @@ class TestVEBSwitching(TestCase): if self.pf_ddriver_flag == 1: self.destroy_env(driver=self.drivername) - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() - self.dut.close_session(self.session_secondary) - self.dut.close_session(self.session_third) - # Marvin recommended that all the dut ports should be bound to DPDK. - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + self.sut_node.kill_all() + self.sut_node.close_session(self.session_secondary) + self.sut_node.close_session(self.session_third) + # Marvin recommended that all the SUT ports should be bound to DPDK. + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver(driver=self.drivername) diff --git a/tests/TestSuite_vf_daemon.py b/tests/TestSuite_vf_daemon.py index aed4ab93..183b53f8 100644 --- a/tests/TestSuite_vf_daemon.py +++ b/tests/TestSuite_vf_daemon.py @@ -9,9 +9,9 @@ import time from scapy.utils import rdpcap import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.qemu_kvm import QEMUKvm +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import get_nic_name from framework.test_case import TestCase @@ -23,8 +23,8 @@ class TestVfDaemon(TestCase): supported_vf_driver = ["pci-stub", "vfio-pci"] def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None self.vm1 = None self.env_done = False @@ -37,20 +37,20 @@ class TestVfDaemon(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def set_up(self): self.setup_vm_env() def check_vf_link_status(self): - self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") + self.sut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, "--port-topology=chained") for i in range(10): out = self.vm0_testpmd.execute_cmd("show port info 0") print(out) if "Link status: down" in out: - self.dut_testpmd.execute_cmd("port stop all") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port start all") time.sleep(2) else: break @@ -63,102 +63,102 @@ class TestVfDaemon(TestCase): if self.env_done: return - self.bind_nic_driver(self.dut_ports[:1], driver="igb_uio") - self.used_dut_port = self.dut_ports[0] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) + self.bind_nic_driver(self.sut_ports[:1], driver="igb_uio") + self.used_sut_port = self.sut_ports[0] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) time.sleep(1) - self.dut_testpmd = PmdOutput(self.dut) - self.dut_testpmd.start_testpmd( + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_testpmd.start_testpmd( "Default", "--rxq=4 --txq=4 --port-topology=chained" ) - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("start") time.sleep(5) vf0_prop = {"opt_host": self.sriov_vfs_port[0].pci} # set up VM0 ENV - self.vm0 = QEMUKvm(self.dut, "vm0", "vf_daemon") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "vf_daemon") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm0_dut_ports = self.vm0_dut.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm0_dut) + self.vm0_sut_ports = self.vm0_sut.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm0_sut) vf1_prop = {"opt_host": self.sriov_vfs_port[1].pci} if self.running_case != "test_vf_mtu": - self.vm1 = QEMUKvm(self.dut, "vm1", "vf_daemon") + self.vm1 = QEMUKvm(self.sut_node, "vm1", "vf_daemon") self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop) try: - self.vm1_dut = self.vm1.start() - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start() + if self.vm1_sut is None: raise Exception("Set up VM1 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm1_dut_ports = self.vm1_dut.get_ports("any") - self.vm1_testpmd = PmdOutput(self.vm1_dut) + self.vm1_sut_ports = self.vm1_sut.get_ports("any") + self.vm1_testpmd = PmdOutput(self.vm1_sut) self.env_done = True - self.dut_testpmd.quit() + self.sut_testpmd.quit() def destroy_vm_env(self): if getattr(self, "vm0", None): - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None if getattr(self, "vm1", None): - self.vm1_dut.kill_all() + self.vm1_sut.kill_all() self.vm1_testpmd = None - self.vm1_dut_ports = None + self.vm1_sut_ports = None # destroy vm1 self.vm1.stop() self.vm1 = None - if getattr(self, "used_dut_port", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] - self.used_dut_port = None + if getattr(self, "used_sut_port", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] + self.used_sut_port = None self.env_done = False def send_packet(self, dst_mac, vlan_id, pktsize, num): """ - Generate packets and send them to dut + Generate packets and send them to SUT """ if vlan_id == 0: - pkt = Packet(pkt_type="UDP", pkt_len=pktsize) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=pktsize) else: - pkt = Packet(pkt_type="VLAN_UDP", pkt_len=pktsize) - pkt.config_layer("vlan", {"vlan": vlan_id}) - pkt.config_layer("ether", {"dst": dst_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP", pkt_len=pktsize) + scapy_pkt_builder.config_layer("vlan", {"vlan": vlan_id}) + scapy_pkt_builder.config_layer("ether", {"dst": dst_mac}) - inst = self.tester.tcpdump_sniff_packets(self.tester_intf) - pkt.send_pkt(self.tester, tx_port=self.tester_intf, count=num) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf, count=num) return inst def strip_mac(self, inst, element="src"): """ Load sniff packets, strip and return mac address from dump message """ - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) macs = [] for i in range(len(pkts)): mac = pkts.strip_element_layer2(element, p_index=i) @@ -169,7 +169,7 @@ class TestVfDaemon(TestCase): """ Load sniff packets, strip and return vlan id from dump message """ - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) vlans = [] for i in range(len(pkts)): vlan = pkts.strip_element_vlan("vlan", p_index=i) @@ -178,17 +178,17 @@ class TestVfDaemon(TestCase): def send_and_pmdout(self, dst_mac, vlan_id=0, pktsize=64, num=1): """ - Send packets to dut and return testpmd output message + Send packets to SUT and return testpmd output message Input: dst_mac, vlan_id, packet size, packet number Output: testpmd output message """ inst = self.send_packet(dst_mac, vlan_id, pktsize, num) - out = self.vm0_dut.get_session_output(timeout=10) + out = self.vm0_sut.get_session_output(timeout=10) return out def send_and_vlanstrip(self, dst_mac, vlan_id=0, pktsize=64, num=1): """ - Send packets to dut, strip and return vlan id from dump message + Send packets to SUT, strip and return vlan id from dump message Input: dst_mac, vlan_id, packet size, packet number Output: vlan id stripped from dump message """ @@ -198,7 +198,7 @@ class TestVfDaemon(TestCase): def send_and_macstrip(self, dst_mac, vlan_id=0, pktsize=64, num=1): """ - Send packets to dut, strip and return src/dst mac from dump message + Send packets to SUT, strip and return src/dst mac from dump message Input: dst_mac, vlan_id, packet size, packet number Output: src/dst mac stripped from dump message """ @@ -221,7 +221,7 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("start") # Disable vlan insert which means insert vlan id as 0 rx_vlan = 0 - self.dut_testpmd.execute_cmd("set vf vlan insert 0 0 %s" % rx_vlan) + self.sut_testpmd.execute_cmd("set vf vlan insert 0 0 %s" % rx_vlan) time.sleep(3) vlans = self.send_and_vlanstrip(self.vf0_mac) self.verify(rx_vlan not in vlans, "Failed to disable vlan insert!!!") @@ -230,7 +230,7 @@ class TestVfDaemon(TestCase): random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] for rx_vlan in rx_vlans: - self.dut_testpmd.execute_cmd("set vf vlan insert 0 0 %s" % rx_vlan) + self.sut_testpmd.execute_cmd("set vf vlan insert 0 0 %s" % rx_vlan) time.sleep(3) vlans = self.send_and_vlanstrip(self.vf0_mac) self.verify(rx_vlan in vlans, "Failed to enable vlan insert packet!!!") @@ -247,8 +247,8 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set vf promisc 0 0 off") - self.dut_testpmd.execute_cmd("set vf allmulti 0 0 off") + self.sut_testpmd.execute_cmd("set vf promisc 0 0 off") + self.sut_testpmd.execute_cmd("set vf allmulti 0 0 off") multi_mac = "F3:00:33:22:11:00" out = self.send_and_pmdout(multi_mac) self.verify("received" not in out, "Failed to disable vf multicast mode!!!") @@ -259,7 +259,7 @@ class TestVfDaemon(TestCase): "dst=%s" % self.vf0_mac in out, "Failed to disable vf multicast mode!!!" ) - self.dut_testpmd.execute_cmd("set vf allmulti 0 0 on") + self.sut_testpmd.execute_cmd("set vf allmulti 0 0 on") out = self.send_and_pmdout(multi_mac) self.verify("received" in out, "Failed to enable vf multicast mode!!!") self.verify( @@ -284,7 +284,7 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set vf promisc 0 0 off") + self.sut_testpmd.execute_cmd("set vf promisc 0 0 off") wrong_mac = "9E:AC:72:49:43:11" time.sleep(10) @@ -297,7 +297,7 @@ class TestVfDaemon(TestCase): "dst=%s" % self.vf0_mac in out, "Failed to disable vf promisc mode!!!" ) - self.dut_testpmd.execute_cmd("set vf promisc 0 0 on") + self.sut_testpmd.execute_cmd("set vf promisc 0 0 on") time.sleep(5) out = self.send_and_pmdout(wrong_mac) self.verify("received" in out, "Failed to enable vf promisc mode!!!") @@ -321,13 +321,13 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set vf broadcast 0 0 off") + self.sut_testpmd.execute_cmd("set vf broadcast 0 0 off") dst_mac = "FF:FF:FF:FF:FF:FF" out = self.send_and_pmdout(dst_mac) self.verify("received" not in out, "Failed to disable vf broadcast mode!!!") - self.dut_testpmd.execute_cmd("set vf broadcast 0 0 on") + self.sut_testpmd.execute_cmd("set vf broadcast 0 0 on") # the config not effective immediately. time.sleep(10) @@ -339,13 +339,13 @@ class TestVfDaemon(TestCase): """ Enable VF MTU change """ - self.tester.send_expect("ifconfig %s mtu 9000" % self.tester_intf, "#") + self.tg_node.send_expect("ifconfig %s mtu 9000" % self.tg_intf, "#") self.check_vf_link_status() time.sleep(10) - self.dut_testpmd.execute_cmd("port stop all") - self.dut_testpmd.execute_cmd("port config mtu 0 9000") - self.dut_testpmd.execute_cmd("port start all") - out = self.dut_testpmd.execute_cmd("show port info 0") + self.sut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port config mtu 0 9000") + self.sut_testpmd.execute_cmd("port start all") + out = self.sut_testpmd.execute_cmd("show port info 0") self.verify("MTU: 9000" in out, "DPDK PF SET MTU FAILED!") self.vf0_mac = self.vm0_testpmd.get_port_mac(0) @@ -373,8 +373,8 @@ class TestVfDaemon(TestCase): ) self.vm0_testpmd.quit() - self.dut_testpmd.quit() - self.tester.send_expect("ifconfig %s mtu 1500" % self.tester_intf, "#") + self.sut_testpmd.quit() + self.tg_node.send_expect("ifconfig %s mtu 1500" % self.tg_intf, "#") def test_vlan_tag(self): """ @@ -393,14 +393,14 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("start") for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd("rx_vlan add %s 0" % rx_vlan) - self.dut_testpmd.execute_cmd("set vf vlan tag 0 0 off") + self.sut_testpmd.execute_cmd("set vf vlan tag 0 0 off") time.sleep(3) out = self.send_and_macstrip(self.vf0_mac, rx_vlan) self.verify( self.vf0_mac.lower() not in out, "Failed to disable vlan tag!!!" ) - self.dut_testpmd.execute_cmd("set vf vlan tag 0 0 on") + self.sut_testpmd.execute_cmd("set vf vlan tag 0 0 on") time.sleep(3) out = self.send_and_macstrip(self.vf0_mac, rx_vlan) self.verify(self.vf0_mac.lower() in out, "Failed to enable vlan tag!!!") @@ -420,10 +420,10 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set tx loopback 0 off") + self.sut_testpmd.execute_cmd("set tx loopback 0 off") time.sleep(5) - inst = self.tester.tcpdump_sniff_packets(self.tester_intf) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf) self.vm1_testpmd.execute_cmd("set burst 5") self.vm1_testpmd.execute_cmd("start tx_first") @@ -434,10 +434,10 @@ class TestVfDaemon(TestCase): self.verify("RX-packets: 0" in out, "Failed to disable tx loopback!!!") self.vm0_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set tx loopback 0 on") + self.sut_testpmd.execute_cmd("set tx loopback 0 on") time.sleep(3) - inst = self.tester.tcpdump_sniff_packets(self.tester_intf) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf) self.vm1_testpmd.execute_cmd("stop") self.vm1_testpmd.execute_cmd("start tx_first") @@ -460,7 +460,7 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") self.vm0_testpmd.execute_cmd("start") - self.dut_testpmd.execute_cmd("set all queues drop 0 off") + self.sut_testpmd.execute_cmd("set all queues drop 0 off") self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vf1_mac = self.vm1_testpmd.get_port_mac(0) out = self.send_and_pmdout(self.vf1_mac, 0, 64, 2000) @@ -469,7 +469,7 @@ class TestVfDaemon(TestCase): out = self.send_and_pmdout(self.vf0_mac, 0, 64, 20) out = self.vm0_testpmd.execute_cmd("show port stats 0") self.verify("RX-packets: 0" in out, "Failed to disable all queues drop!!!") - self.dut_testpmd.execute_cmd("set all queues drop 0 on") + self.sut_testpmd.execute_cmd("set all queues drop 0 on") out = self.vm0_testpmd.execute_cmd("show port stats 0") self.verify("RX-packets: 20" in out, "Failed to enable all queues drop!!!") out = self.send_and_pmdout(self.vf0_mac, 0, 64, 20) @@ -483,7 +483,7 @@ class TestVfDaemon(TestCase): expect_mac = "A2:22:33:44:55:66" self.check_vf_link_status() self.vm0_testpmd.quit() - self.dut_testpmd.execute_cmd("set vf mac addr 0 0 %s" % expect_mac) + self.sut_testpmd.execute_cmd("set vf mac addr 0 0 %s" % expect_mac) time.sleep(5) out = self.vm0_testpmd.start_testpmd(VM_CORES_MASK, "--port-topology=chained") @@ -510,20 +510,20 @@ class TestVfDaemon(TestCase): unmatch_vlan = (random_vlan + 2) % 4096 self.vm0_testpmd.execute_cmd("set fwd mac") self.vm0_testpmd.execute_cmd("start") - # self.dut_testpmd.execute_cmd('rx_vlan add %d port 0 vf 1' % match_vlan) + # self.sut_testpmd.execute_cmd('rx_vlan add %d port 0 vf 1' % match_vlan) self.vm0_testpmd.execute_cmd("vlan set filter on 0") self.vm0_testpmd.execute_cmd("rx_vlan add %d 0" % match_vlan) if self.kdriver == "i40e": - self.dut_testpmd.execute_cmd("set vf vlan stripq 0 0 off") + self.sut_testpmd.execute_cmd("set vf vlan stripq 0 0 off") else: # Since dpdk18.02 commit 8b9bd0, testpmd vlan filter was disabled by default # But some pmds still enabled it, so enable and disable vlan filter again - self.dut_testpmd.execute_cmd("vlan set filter on 0") - self.dut_testpmd.execute_cmd("vlan set filter off 0") - self.dut_testpmd.execute_cmd("vlan set strip off 0") + self.sut_testpmd.execute_cmd("vlan set filter on 0") + self.sut_testpmd.execute_cmd("vlan set filter off 0") + self.sut_testpmd.execute_cmd("vlan set strip off 0") self.vm0_testpmd.execute_cmd("vlan set strip off 0") - self.dut_testpmd.execute_cmd("set vf vlan antispoof 0 0 off") + self.sut_testpmd.execute_cmd("set vf vlan antispoof 0 0 off") time.sleep(10) out = self.send_and_macstrip(self.vf0_mac, match_vlan) self.verify( @@ -540,8 +540,8 @@ class TestVfDaemon(TestCase): ) if self.kdriver == "ixgbe": - self.dut_testpmd.execute_cmd("set vf mac antispoof 0 0 on") - self.dut_testpmd.execute_cmd("set vf vlan antispoof 0 0 on") + self.sut_testpmd.execute_cmd("set vf mac antispoof 0 0 on") + self.sut_testpmd.execute_cmd("set vf vlan antispoof 0 0 on") time.sleep(3) out = self.send_and_macstrip(self.vf0_mac, match_vlan) @@ -575,12 +575,12 @@ class TestVfDaemon(TestCase): for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd("vlan set filter on 0") self.vm0_testpmd.execute_cmd("rx_vlan add %s 0" % rx_vlan) - self.dut_testpmd.execute_cmd("set vf vlan stripq 0 0 off") + self.sut_testpmd.execute_cmd("set vf vlan stripq 0 0 off") time.sleep(3) out = self.send_and_vlanstrip(self.vf0_mac, rx_vlan) self.verify(rx_vlan in out, "Failed to disable strip vlan!!!") - self.dut_testpmd.execute_cmd("set vf vlan stripq 0 0 on") + self.sut_testpmd.execute_cmd("set vf vlan stripq 0 0 on") time.sleep(3) out = self.send_and_vlanstrip(self.vf0_mac, rx_vlan) self.verify(rx_vlan not in out, "Failed to disable strip vlan!!!") @@ -615,7 +615,7 @@ class TestVfDaemon(TestCase): random_vlan = random.randint(2, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] for rx_vlan in rx_vlans: - self.dut_testpmd.execute_cmd("rx_vlan add %s port 0 vf 1" % rx_vlan) + self.sut_testpmd.execute_cmd("rx_vlan add %s port 0 vf 1" % rx_vlan) time.sleep(5) out = self.send_and_pmdout(wrong_mac, rx_vlan) @@ -632,7 +632,7 @@ class TestVfDaemon(TestCase): self.verify( "dst=%s" % wrong_mac not in out, "Failed to enable vlan filter!!!" ) - self.dut_testpmd.execute_cmd("rx_vlan rm %s port 0 vf 1" % rx_vlan) + self.sut_testpmd.execute_cmd("rx_vlan rm %s port 0 vf 1" % rx_vlan) time.sleep(3) out = self.send_and_pmdout(wrong_mac, rx_vlan) self.verify("dst=%s" % wrong_mac in out, "Failed to disable vlan filter!!!") @@ -655,7 +655,7 @@ class TestVfDaemon(TestCase): """ Enable jumbo frame for VF by configuring DPDK PF. """ - self.tester.send_expect("ifconfig %s mtu 9000" % self.tester_intf, "#") + self.tg_node.send_expect("ifconfig %s mtu 9000" % self.tg_intf, "#") self.check_vf_link_status() time.sleep(10) self.vf0_mac = self.vm0_testpmd.get_port_mac(0) @@ -694,8 +694,8 @@ class TestVfDaemon(TestCase): self.verify("received" not in out, "Failed to receive this length packet!!!") self.vm0_testpmd.quit() - self.dut_testpmd.quit() - self.tester.send_expect("ifconfig %s mtu 1500" % self.tester_intf, "#") + self.sut_testpmd.quit() + self.tg_node.send_expect("ifconfig %s mtu 1500" % self.tg_intf, "#") def test_stats_show_clear(self): """ @@ -703,7 +703,7 @@ class TestVfDaemon(TestCase): """ self.check_vf_link_status() self.vf0_mac = self.vm0_testpmd.get_port_mac(0) - out = self.dut_testpmd.execute_cmd("show vf stats 0 0") + out = self.sut_testpmd.execute_cmd("show vf stats 0 0") self.verify( "RX-packets: 0" in out and "TX-packets: 0" in out, "Fail to show VF RX and TX stats from PF", @@ -720,7 +720,7 @@ class TestVfDaemon(TestCase): self.send_packet(self.vf0_mac, 0, 64, 10) - out = self.dut_testpmd.execute_cmd("show vf stats 0 0") + out = self.sut_testpmd.execute_cmd("show vf stats 0 0") self.verify( "RX-packets: 10" in out and "TX-packets: 10" in out, "Wrong to show VF RX and TX packets from PF", @@ -731,8 +731,8 @@ class TestVfDaemon(TestCase): "Wrong to show VF RX and TX stats", ) - self.dut_testpmd.execute_cmd("clear vf stats 0 0") - out = self.dut_testpmd.execute_cmd("show vf stats 0 0") + self.sut_testpmd.execute_cmd("clear vf stats 0 0") + out = self.sut_testpmd.execute_cmd("show vf stats 0 0") self.verify( "RX-packets: 0" in out and "TX-packets: 0" in out, "Fail to clear VF RX and TX stats from PF", @@ -747,11 +747,11 @@ class TestVfDaemon(TestCase): self.vm0_testpmd.quit() if self.running_case != "test_vf_mtu": self.vm1_testpmd.quit() - self.dut_testpmd.quit() + self.sut_testpmd.quit() time.sleep(3) - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() if self.running_case != "test_vf_mtu": - self.vm1_dut.kill_all() + self.vm1_sut.kill_all() def tear_down_all(self): self.destroy_vm_env() diff --git a/tests/TestSuite_vf_interrupt_pmd.py b/tests/TestSuite_vf_interrupt_pmd.py index 5854cb0c..559b6b15 100644 --- a/tests/TestSuite_vf_interrupt_pmd.py +++ b/tests/TestSuite_vf_interrupt_pmd.py @@ -12,7 +12,7 @@ import re import time import framework.utils as utils -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.virt_common import VM @@ -24,27 +24,27 @@ class TestVfInterruptPmd(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") self.env_done = False cores = "1S/4C/1T" self.number_of_ports = 1 - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) ports = [] for port in range(self.number_of_ports): - ports.append(self.dut_ports[port]) - self.core_list = self.dut.get_core_list(cores, socket=self.ports_socket) + ports.append(self.sut_ports[port]) + self.core_list = self.sut_node.get_core_list(cores, socket=self.ports_socket) self.core_user = self.core_list[0] self.port_mask = utils.create_mask(ports) self.core_mask_user = utils.create_mask(self.core_list[0:1]) - testport_0 = self.tester.get_local_port(self.dut_ports[0]) - self.rx_intf_0 = self.tester.get_interface(testport_0) - self.tester_mac = self.tester.get_mac(testport_0) + testport_0 = self.tg_node.get_local_port(self.sut_ports[0]) + self.rx_intf_0 = self.tg_node.get_interface(testport_0) + self.tg_mac = self.tg_node.get_mac(testport_0) self.vf0_mac = "00:12:34:56:78:01" self.vf_mac = "00:12:34:56:78:02" - self.mac_port_0 = self.dut.get_mac_address(self.dut_ports[0]) + self.mac_port_0 = self.sut_node.get_mac_address(self.sut_ports[0]) self.queues = 1 self.vf_driver = "vfio-pci" self.vf_assign_method = "vfio-pci" @@ -56,55 +56,55 @@ class TestVfInterruptPmd(TestCase): """ Run before each test case. """ - self.dut.restore_interfaces() + self.sut_node.restore_interfaces() - def prepare_l3fwd_power(self, use_dut): + def prepare_l3fwd_power(self, use_sut): """ Compile dpdk-l3fwd-power """ - out = use_dut.build_dpdk_apps("./examples/l3fwd-power") - self.path = use_dut.apps_name["l3fwd-power"] + out = use_sut.build_dpdk_apps("./examples/l3fwd-power") + self.path = use_sut.apps_name["l3fwd-power"] self.verify("Error" not in out, "compilation error") - def send_packet(self, mac, testinterface, use_dut): + def send_packet(self, mac, testinterface, use_sut): """ Send a packet and verify """ - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.send_pkt(self.tester, tx_port=testinterface) - self.out2 = use_dut.get_session_output(timeout=2) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=testinterface) + self.out2 = use_sut.get_session_output(timeout=2) - def send_packet_loop(self, mac, testinterface, use_dut, ip_addr): + def send_packet_loop(self, mac, testinterface, use_sut, ip_addr): """ Send a packet and verify """ - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("ipv4", {"dst": "2.1.1.5", "src": "2.1.1.%s" % ip_addr}) - pkt.send_pkt(self.tester, tx_port=testinterface) - self.out2 = use_dut.get_session_output(timeout=2) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("ipv4", {"dst": "2.1.1.5", "src": "2.1.1.%s" % ip_addr}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=testinterface) + self.out2 = use_sut.get_session_output(timeout=2) def set_NIC_link(self): """ When starting l3fwd-power on vf, ensure that PF link is up """ - self.used_dut_port = self.dut_ports[0] - self.host_intf = self.dut.ports_info[self.used_dut_port]["intf"] - self.dut.send_expect("ifconfig %s up" % self.host_intf, "#", 3) + self.used_sut_port = self.sut_ports[0] + self.host_intf = self.sut_node.ports_info[self.used_sut_port]["intf"] + self.sut_node.send_expect("ifconfig %s up" % self.host_intf, "#", 3) - def begin_l3fwd_power(self, use_dut): + def begin_l3fwd_power(self, use_sut): """ begin l3fwd-power """ cmd_vhost_net = ( self.path - + "-n %d -c %s" % (use_dut.get_memory_channels(), self.core_mask_user) + + "-n %d -c %s" % (use_sut.get_memory_channels(), self.core_mask_user) + " -- -P -p 1 --config='(0,0,%s)'" % self.core_user ) try: self.logger.info("Launch l3fwd_sample sample:") - self.out = use_dut.send_expect( + self.out = use_sut.send_expect( cmd_vhost_net, "Checking link statusdone", 60 ) if "Error" in self.out: @@ -116,7 +116,7 @@ class TestVfInterruptPmd(TestCase): "ERROR: Failed to launch l3fwd-power sample: %s" % str(e) ) - def begin_l3fwd_power_multi_queues(self, use_dut): + def begin_l3fwd_power_multi_queues(self, use_sut): """ begin l3fwd-power """ @@ -132,7 +132,7 @@ class TestVfInterruptPmd(TestCase): ) try: self.logger.info("Launch l3fwd_sample sample:") - self.out = use_dut.send_expect( + self.out = use_sut.send_expect( cmd_vhost_net, "Checking link statusdone", 60 ) self.logger.info(self.out) @@ -149,13 +149,13 @@ class TestVfInterruptPmd(TestCase): """ if self.env_done: return - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] - self.host_intf0 = self.dut.ports_info[self.used_dut_port_0]["intf"] + self.host_intf0 = self.sut_node.ports_info[self.used_sut_port_0]["intf"] # set vf mac - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf0, self.vf0_mac), "# " ) @@ -163,15 +163,15 @@ class TestVfInterruptPmd(TestCase): port.bind_driver(self.vf_driver) vf0_prop_0 = {"opt_host": self.sriov_vfs_port_0[0].pci} - self.vm0 = VM(self.dut, "vm0", "vf_interrupt_pmd") + self.vm0 = VM(self.sut_node, "vm0", "vf_interrupt_pmd") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop_0) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM ENV failed") else: self.verify( - self.vm0_dut.ports_info[0]["intf"] != "N/A", "Not interface" + self.vm0_sut.ports_info[0]["intf"] != "N/A", "Not interface" ) except Exception as e: self.destroy_vm_env() @@ -184,15 +184,15 @@ class TestVfInterruptPmd(TestCase): destroy vm environment """ if getattr(self, "vm0", None): - self.vm0_dut.kill_all() - self.vm0_dut_ports = None + self.vm0_sut.kill_all() + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - self.used_dut_port_0 = None + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + self.used_sut_port_0 = None self.env_done = False @@ -200,34 +200,34 @@ class TestVfInterruptPmd(TestCase): """ Bind VF0 to vfio-pci """ - self.vm0_dut.send_expect("modprobe -r vfio_iommu_type1", "#", 3) - self.vm0_dut.send_expect("modprobe -r vfio", "#", 3) - self.vm0_dut.send_expect("modprobe vfio enable_unsafe_noiommu_mode=1", "#", 3) - self.vm0_dut.send_expect("modprobe vfio-pci", "#", 3) - self.vm0_dut.bind_interfaces_linux(driver="vfio-pci") + self.vm0_sut.send_expect("modprobe -r vfio_iommu_type1", "#", 3) + self.vm0_sut.send_expect("modprobe -r vfio", "#", 3) + self.vm0_sut.send_expect("modprobe vfio enable_unsafe_noiommu_mode=1", "#", 3) + self.vm0_sut.send_expect("modprobe vfio-pci", "#", 3) + self.vm0_sut.bind_interfaces_linux(driver="vfio-pci") def test_nic_interrupt_VM_vfio_pci(self): """ Check for interrupts within the VM """ self.setup_vm_env() - self.prepare_l3fwd_power(self.vm0_dut) - self.vm0_dut.send_expect( + self.prepare_l3fwd_power(self.vm0_sut) + self.vm0_sut.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf0, self.vf0_mac), "# " ) self.VF0_bind_vfio_pci() cores = "1S/1C/1T" - core_list = self.vm0_dut.get_core_list(cores) + core_list = self.vm0_sut.get_core_list(cores) core_user = core_list[0] core_mask_user = utils.create_mask(core_list) cmd = self.path + "-c %s -n %d -- -P -p 0x01 --config='(0,0,%s)'" % ( core_mask_user, - self.vm0_dut.get_memory_channels(), + self.vm0_sut.get_memory_channels(), core_user, ) - self.vm0_dut.send_expect(cmd, "Checking link statusdone", 60) - self.send_packet(self.vf0_mac, self.rx_intf_0, self.vm0_dut) + self.vm0_sut.send_expect(cmd, "Checking link statusdone", 60) + self.send_packet(self.vf0_mac, self.rx_intf_0, self.vm0_sut) self.destroy_vm_env() self.verify( "lcore %s is waked up from rx interrupt on port 0" % core_user in self.out2, @@ -242,20 +242,20 @@ class TestVfInterruptPmd(TestCase): """ Check Interrupt for VF with vfio driver """ - self.prepare_l3fwd_power(self.dut) + self.prepare_l3fwd_power(self.sut_node) self.set_NIC_link() # generate VF and bind to vfio-pci - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] for port in self.sriov_vfs_port_0: port.bind_driver("vfio-pci") # set vf mac - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# " ) - self.begin_l3fwd_power(self.dut) - self.send_packet(self.vf_mac, self.rx_intf_0, self.dut) + self.begin_l3fwd_power(self.sut_node) + self.send_packet(self.vf_mac, self.rx_intf_0, self.sut_node) self.verify( "lcore %s is waked up from rx interrupt on port 0" % self.core_user in self.out2, @@ -270,13 +270,13 @@ class TestVfInterruptPmd(TestCase): """ Check Interrupt for PF with vfio-pci driver """ - self.prepare_l3fwd_power(self.dut) + self.prepare_l3fwd_power(self.sut_node) - self.dut.ports_info[0]["port"].bind_driver(driver="vfio-pci") + self.sut_node.ports_info[0]["port"].bind_driver(driver="vfio-pci") - self.begin_l3fwd_power(self.dut) + self.begin_l3fwd_power(self.sut_node) - self.send_packet(self.mac_port_0, self.rx_intf_0, self.dut) + self.send_packet(self.mac_port_0, self.rx_intf_0, self.sut_node) self.verify( "lcore %s is waked up from rx interrupt on port 0" % self.core_user @@ -292,15 +292,15 @@ class TestVfInterruptPmd(TestCase): """ Check Interrupt for PF with igb_uio driver """ - self.prepare_l3fwd_power(self.dut) + self.prepare_l3fwd_power(self.sut_node) - self.dut.setup_modules_linux(self.target, "igb_uio", "") + self.sut_node.setup_modules_linux(self.target, "igb_uio", "") - self.dut.ports_info[0]["port"].bind_driver(driver="igb_uio") + self.sut_node.ports_info[0]["port"].bind_driver(driver="igb_uio") - self.begin_l3fwd_power(self.dut) + self.begin_l3fwd_power(self.sut_node) - self.send_packet(self.mac_port_0, self.rx_intf_0, self.dut) + self.send_packet(self.mac_port_0, self.rx_intf_0, self.sut_node) self.verify( "lcore %s is waked up from rx interrupt on port 0" % self.core_user @@ -330,22 +330,22 @@ class TestVfInterruptPmd(TestCase): "%s nic port not support vf multi-queues interrupt" % str(self.nic), ) self.queues = 4 - self.prepare_l3fwd_power(self.dut) + self.prepare_l3fwd_power(self.sut_node) self.set_NIC_link() # generate VF and bind to vfio-pci - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] for port in self.sriov_vfs_port_0: port.bind_driver("vfio-pci") # set vf mac - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# " ) - self.begin_l3fwd_power_multi_queues(self.dut) + self.begin_l3fwd_power_multi_queues(self.sut_node) stroutput = "" for ip in range(2, 10): - self.send_packet_loop(self.vf_mac, self.rx_intf_0, self.dut, ip) + self.send_packet_loop(self.vf_mac, self.rx_intf_0, self.sut_node, ip) stroutput = stroutput + self.out2 for queue in range(self.queues): self.verify( @@ -375,14 +375,14 @@ class TestVfInterruptPmd(TestCase): "%s nic port not support vf multi-queues interrupt" % str(self.nic), ) self.setup_vm_env() - self.vm0_dut.send_expect( + self.vm0_sut.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf0, self.vf0_mac), "# " ) self.queues = 4 - self.prepare_l3fwd_power(self.vm0_dut) + self.prepare_l3fwd_power(self.vm0_sut) self.VF0_bind_vfio_pci() cores = "1S/4C/1T" - core_list = self.vm0_dut.get_core_list(cores) + core_list = self.vm0_sut.get_core_list(cores) core_mask_user = utils.create_mask(core_list) config_info = "" for queue in range(self.queues): @@ -394,10 +394,10 @@ class TestVfInterruptPmd(TestCase): + "-c %s -n 4 -- -P -p 0x1" % core_mask_user + " --config='%s'" % config_info ) - self.vm0_dut.send_expect(cmd, "Checking link statusdone", 60) + self.vm0_sut.send_expect(cmd, "Checking link statusdone", 60) stroutput = "" for ip in range(2, 10): - self.send_packet_loop(self.vf0_mac, self.rx_intf_0, self.vm0_dut, ip) + self.send_packet_loop(self.vf0_mac, self.rx_intf_0, self.vm0_sut, ip) stroutput = stroutput + self.out2 self.destroy_vm_env() for queue in range(self.queues): @@ -414,7 +414,7 @@ class TestVfInterruptPmd(TestCase): """ Run after each test case. """ - self.dut.send_expect( + self.sut_node.send_expect( "killall %s" % self.path.strip().split("/")[-1], "# ", 10, alt_session=True ) diff --git a/tests/TestSuite_vf_jumboframe.py b/tests/TestSuite_vf_jumboframe.py index 42609e32..6260f607 100644 --- a/tests/TestSuite_vf_jumboframe.py +++ b/tests/TestSuite_vf_jumboframe.py @@ -6,8 +6,8 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase from framework.utils import RED @@ -25,18 +25,18 @@ class TestVfJumboFrame(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None self.env_done = False - self.port = self.dut_ports[0] + self.port = self.sut_ports[0] self.vm_port = 0 - cores = self.dut.get_core_list("1S/1C/1T") + cores = self.sut_node.get_core_list("1S/1C/1T") self.port_mask = utils.create_mask([self.port]) # set vf assign method and vf driver - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: self.vf_driver = "pci-stub" @@ -45,11 +45,11 @@ class TestVfJumboFrame(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") - # enable tester mtu - tester_port = self.tester.get_local_port(self.port) - self.netobj = self.tester.ports_info[tester_port]["port"] + # enable TG mtu + tg_port = self.tg_node.get_local_port(self.port) + self.netobj = self.tg_node.ports_info[tg_port]["port"] self.netobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU + 100) self.setup_vm_env() @@ -65,17 +65,17 @@ class TestVfJumboFrame(TestCase): return # bind to default driver - self.bind_nic_driver(self.dut_ports[:1], driver="") + self.bind_nic_driver(self.sut_ports[:1], driver="") - self.used_dut_port = self.dut_ports[0] - self.host_intf = self.dut.ports_info[self.used_dut_port]["intf"] - tester_port = self.tester.get_local_port(self.used_dut_port) - self.tester_intf = self.tester.get_interface(tester_port) + self.used_sut_port = self.sut_ports[0] + self.host_intf = self.sut_node.ports_info[self.used_sut_port]["intf"] + tg_port = self.tg_node.get_local_port(self.used_sut_port) + self.tg_intf = self.tg_node.get_interface(tg_port) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] self.vf_mac = "00:10:00:00:00:00" - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# " ) @@ -88,13 +88,13 @@ class TestVfJumboFrame(TestCase): vf_popt = {"opt_host": self.sriov_vfs_port[0].pci} # set up VM ENV - self.vm = VM(self.dut, "vm0", "vf_jumboframe") + self.vm = VM(self.sut_node, "vm0", "vf_jumboframe") self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt) - self.vm_dut = self.vm.start() - if self.vm_dut is None: + self.vm_sut = self.vm.start() + if self.vm_sut is None: raise Exception("Set up VM ENV failed!") - self.vm_testpmd = PmdOutput(self.vm_dut) + self.vm_testpmd = PmdOutput(self.vm_sut) except Exception as e: self.destroy_vm_env() @@ -104,20 +104,20 @@ class TestVfJumboFrame(TestCase): def destroy_vm_env(self): if getattr(self, "vm", None): - if getattr(self, "vm_dut", None): - self.vm_dut.kill_all() + if getattr(self, "vm_sut", None): + self.vm_sut.kill_all() self.vm_testpmd = None - self.vm_dut_ports = None + self.vm_sut_ports = None # destroy vm0 self.vm.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() time.sleep(3) self.vm = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - self.used_dut_port = None - self.bind_nic_driver(self.dut_ports[:1], driver="default") + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + self.used_sut_port = None + self.bind_nic_driver(self.sut_ports[:1], driver="default") self.env_done = False @@ -144,11 +144,11 @@ class TestVfJumboFrame(TestCase): int(_) for _ in self.jumboframes_get_stat(self.vm_port, "rx") ] - mac = self.vm_dut.get_mac_address(self.vm_port) + mac = self.vm_sut.get_mac_address(self.vm_port) - pkt = Packet(pkt_type="UDP", pkt_len=pktsize) - pkt.config_layer("ether", {"dst": mac}) - pkt.send_pkt(self.tester, tx_port=self.tester_intf, timeout=30) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=pktsize) + scapy_pkt_builder.config_layer("ether", {"dst": mac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf, timeout=30) time.sleep(1) @@ -191,8 +191,8 @@ class TestVfJumboFrame(TestCase): This case aims to test transmitting normal size packet without jumbo enable """ # should enable jumbo on host - self.dutobj = self.dut.ports_info[self.port]["port"] - self.dutobj.enable_jumbo(framesize=ETHER_STANDARD_MTU) + self.sut_node = self.sut_node.ports_info[self.port]["port"] + self.sut_node.enable_jumbo(framesize=ETHER_STANDARD_MTU) self.vm_testpmd.start_testpmd( "Default", @@ -213,8 +213,8 @@ class TestVfJumboFrame(TestCase): packet forwarding should be support correct. """ # should enable jumbo on host - self.dutobj = self.dut.ports_info[self.port]["port"] - self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) + self.sut_node = self.sut_node.ports_info[self.port]["port"] + self.sut_node.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.start_testpmd( "Default", @@ -235,8 +235,8 @@ class TestVfJumboFrame(TestCase): jumbo frame support. """ # should enable jumbo on host - self.dutobj = self.dut.ports_info[self.port]["port"] - self.dutobj.enable_jumbo(framesize=ETHER_STANDARD_MTU) + self.sut_node = self.sut_node.ports_info[self.port]["port"] + self.sut_node.enable_jumbo(framesize=ETHER_STANDARD_MTU) self.vm_testpmd.start_testpmd( "Default", "--port-topology=loop --tx-offloads=0x8000" @@ -263,8 +263,8 @@ class TestVfJumboFrame(TestCase): packet can be forwarded correct. """ # should enable jumbo on host - self.dutobj = self.dut.ports_info[self.port]["port"] - self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) + self.sut_node = self.sut_node.ports_info[self.port]["port"] + self.sut_node.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.start_testpmd( "Default", @@ -286,8 +286,8 @@ class TestVfJumboFrame(TestCase): packet which the length bigger than MTU can not be forwarded. """ # should enable jumbo on host - self.dutobj = self.dut.ports_info[self.port]["port"] - self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) + self.sut_node = self.sut_node.ports_info[self.port]["port"] + self.sut_node.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.start_testpmd( "Default", diff --git a/tests/TestSuite_vf_kernel.py b/tests/TestSuite_vf_kernel.py index eba17dfe..95408817 100644 --- a/tests/TestSuite_vf_kernel.py +++ b/tests/TestSuite_vf_kernel.py @@ -14,9 +14,9 @@ import threading import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.qemu_kvm import QEMUKvm +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.utils import GREEN, RED @@ -29,19 +29,19 @@ class TestVfKernel(TestCase): """ Run at the start of each test suite. """ - self.dut.send_expect("service network-manager stop", "#", 60) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") - self.cores = self.dut.get_core_list("1S/4C/1T") + self.sut_node.send_expect("service network-manager stop", "#", 60) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") + self.cores = self.sut_node.get_core_list("1S/4C/1T") self.coremask = utils.create_mask(self.cores) - self.dmac = self.dut.get_mac_address(self.dut_ports[0]) - txport = self.tester.get_local_port(self.dut_ports[0]) - self.tester_intf = self.tester.get_interface(txport) - self.tester_mac = self.tester.get_mac(txport) + self.dmac = self.sut_node.get_mac_address(self.sut_ports[0]) + txport = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_intf = self.tg_node.get_interface(txport) + self.tg_mac = self.tg_node.get_mac(txport) - self.intf = self.dut.ports_info[self.dut_ports[0]]["intf"] - self.pci = self.dut.ports_info[self.dut_ports[0]]["pci"].split(":") + self.intf = self.sut_node.ports_info[self.sut_ports[0]]["intf"] + self.pci = self.sut_node.ports_info[self.sut_ports[0]]["pci"].split(":") self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: @@ -51,7 +51,7 @@ class TestVfKernel(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.src_logo = "12:34:56:78:90:10" self.setup_vm_env() @@ -61,7 +61,7 @@ class TestVfKernel(TestCase): Run before each test case. """ self.verify( - self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "vf link down" + self.check_pf_vf_link_status(self.vm0_sut, self.vm0_intf0), "vf link down" ) pass @@ -76,30 +76,30 @@ class TestVfKernel(TestCase): self.src_logo, ) pkts += pkt - self.tester.send_expect("rm -rf flow.pcap", "#", 10) - self.tester.scapy_append('wrpcap("flow.pcap", [%s])' % pkts) - self.tester.scapy_execute() + self.tg_node.send_expect("rm -rf flow.pcap", "#", 10) + self.tg_node.scapy_append('wrpcap("flow.pcap", [%s])' % pkts) + self.tg_node.scapy_execute() def setup_vm_env(self): """ 1pf -> 6vfs , 4vf->vm0, 2vf->vm1 """ - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 6, driver="igb_uio") - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 6, driver="igb_uio") + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) time.sleep(1) - self.dut_testpmd = PmdOutput(self.dut) - self.dut_testpmd.start_testpmd( + self.sut_testpmd = PmdOutput(self.sut_node) + self.sut_testpmd.start_testpmd( "Default", "--rxq=4 --txq=4 --port-topology=chained" ) # dpdk-2208 # since there is no forward engine on DPDK PF to forward or drop packet in packet pool, # so finally the pool will be full, then no more packet will be # received by VF - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("start") time.sleep(5) vf0_prop_1 = {"opt_host": self.sriov_vfs_port[0].pci} @@ -107,18 +107,18 @@ class TestVfKernel(TestCase): vf0_prop_3 = {"opt_host": self.sriov_vfs_port[2].pci} vf0_prop_4 = {"opt_host": self.sriov_vfs_port[3].pci} - self.vm0 = QEMUKvm(self.dut, "vm0", "vf_kernel") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "vf_kernel") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop_1) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop_2) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop_3) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop_4) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM ENV failed") else: self.verify( - self.vm0_dut.ports_info[0]["intf"] != "N/A", "Not interface" + self.vm0_sut.ports_info[0]["intf"] != "N/A", "Not interface" ) except Exception as e: self.destroy_vm_env() @@ -126,71 +126,71 @@ class TestVfKernel(TestCase): vf1_prop_5 = {"opt_host": self.sriov_vfs_port[4].pci} vf1_prop_6 = {"opt_host": self.sriov_vfs_port[5].pci} - self.vm1 = QEMUKvm(self.dut, "vm1", "vf_kernel") + self.vm1 = QEMUKvm(self.sut_node, "vm1", "vf_kernel") self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop_5) self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop_6) try: - self.vm1_dut = self.vm1.start() - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start() + if self.vm1_sut is None: raise Exception("Set up VM1 ENV failed!") else: # Intel® Ethernet 700 Series: PF not up ,vf will not get interface self.verify( - self.vm1_dut.ports_info[0]["intf"] != "N/A", "Not interface" + self.vm1_sut.ports_info[0]["intf"] != "N/A", "Not interface" ) except Exception as e: self.destroy_vm_env() raise Exception(e) - self.vm0_testpmd = PmdOutput(self.vm0_dut) - self.vm1_testpmd = PmdOutput(self.vm1_dut) + self.vm0_testpmd = PmdOutput(self.vm0_sut) + self.vm1_testpmd = PmdOutput(self.vm1_sut) - self.vm0_vf0_mac = self.vm0_dut.get_mac_address(0) - self.vm0_vf1_mac = self.vm0_dut.get_mac_address(1) - self.vm0_vf2_mac = self.vm0_dut.get_mac_address(2) - self.vm0_vf3_mac = self.vm0_dut.get_mac_address(3) + self.vm0_vf0_mac = self.vm0_sut.get_mac_address(0) + self.vm0_vf1_mac = self.vm0_sut.get_mac_address(1) + self.vm0_vf2_mac = self.vm0_sut.get_mac_address(2) + self.vm0_vf3_mac = self.vm0_sut.get_mac_address(3) - self.vm1_vf0_mac = self.vm1_dut.get_mac_address(0) - self.vm1_vf1_mac = self.vm1_dut.get_mac_address(1) + self.vm1_vf0_mac = self.vm1_sut.get_mac_address(0) + self.vm1_vf1_mac = self.vm1_sut.get_mac_address(1) - self.vm0_intf0 = self.vm0_dut.ports_info[0]["intf"] - self.vm0_intf1 = self.vm0_dut.ports_info[1]["intf"] + self.vm0_intf0 = self.vm0_sut.ports_info[0]["intf"] + self.vm0_intf1 = self.vm0_sut.ports_info[1]["intf"] - self.vm1_intf0 = self.vm1_dut.ports_info[0]["intf"] + self.vm1_intf0 = self.vm1_sut.ports_info[0]["intf"] - self.vm0_dut.restore_interfaces_linux() - self.vm1_dut.restore_interfaces_linux() + self.vm0_sut.restore_interfaces_linux() + self.vm1_sut.restore_interfaces_linux() # stop NetworkManager, this if for centos7 # you may change it when the os no support - self.vm0_dut.send_expect("systemctl stop NetworkManager", "# ", 60) - self.vm1_dut.send_expect("systemctl stop NetworkManager", "# ", 60) + self.vm0_sut.send_expect("systemctl stop NetworkManager", "# ", 60) + self.vm1_sut.send_expect("systemctl stop NetworkManager", "# ", 60) def destroy_vm_env(self): """ destroy vm environment """ if getattr(self, "vm0", None): - self.vm0_dut.kill_all() - self.vm0_dut_ports = None + self.vm0_sut.kill_all() + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None if getattr(self, "vm1", None): - self.vm1_dut.kill_all() - self.vm1_dut_ports = None + self.vm1_sut.kill_all() + self.vm1_sut_ports = None # destroy vm1 self.vm1.stop() self.vm1 = None - self.dut.virt_exit() + self.sut_node.virt_exit() - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] - self.used_dut_port = None + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] + self.used_sut_port = None def test_link(self): """ @@ -199,21 +199,21 @@ class TestVfKernel(TestCase): for i in range(5): # pf up + vf up -> vf up start_time = time.time() - self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") while "Link detected: yes" not in out: end_time = time.time() if end_time - start_time >= 3: break else: - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") time.sleep(1) self.verify("Link detected: yes" in out, "Wrong link status") time.sleep(3) # pf up + vf down -> vf down - self.vm0_dut.send_expect("ifconfig %s down" % self.vm0_intf0, "#") - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s down" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: no" in out, "Wrong link status") time.sleep(3) @@ -241,120 +241,120 @@ class TestVfKernel(TestCase): pf_ip = "5.5.5.%d" % (random_ip + 2) # down-up link - for port_info in self.vm0_dut.ports_info: + for port_info in self.vm0_sut.ports_info: vm0_intf = port_info["intf"] self.verify( - self.check_pf_vf_link_status(self.vm0_dut, vm0_intf), + self.check_pf_vf_link_status(self.vm0_sut, vm0_intf), "VM0_vf: %s link down" % vm0_intf, ) - self.vm0_dut.send_expect( + self.vm0_sut.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.vm0_intf0, vm0_ip0), "#" ) - self.vm0_dut.send_expect( + self.vm0_sut.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.vm0_intf1, vm0_ip1), "#" ) - self.tester.send_expect( - "ifconfig %s %s netmask 255.255.255.0" % (self.tester_intf, pf_ip), "#" + self.tg_node.send_expect( + "ifconfig %s %s netmask 255.255.255.0" % (self.tg_intf, pf_ip), "#" ) # pf ping vm0_vf0 self.verify( - self.ping4(self.tester, self.tester_intf, vm0_ip0), - "%s ping %s failed" % (self.tester_intf, vm0_ip0), + self.ping4(self.tg_node, self.tg_intf, vm0_ip0), + "%s ping %s failed" % (self.tg_intf, vm0_ip0), ) # vm0_vf0 ping pf self.verify( - self.ping4(self.vm0_dut, self.vm0_intf0, pf_ip), + self.ping4(self.vm0_sut, self.vm0_intf0, pf_ip), "%s ping %s failed" % (self.vm0_intf0, pf_ip), ) # pf ping vm0_vf1 self.verify( - self.ping4(self.tester, self.tester_intf, vm0_ip1), - "%s ping %s failed" % (self.tester_intf, vm0_ip1), + self.ping4(self.tg_node, self.tg_intf, vm0_ip1), + "%s ping %s failed" % (self.tg_intf, vm0_ip1), ) # vm0_pf1 ping pf self.verify( - self.ping4(self.vm0_dut, self.vm0_intf1, pf_ip), + self.ping4(self.vm0_sut, self.vm0_intf1, pf_ip), "%s ping %s failed" % (self.vm0_intf1, pf_ip), ) # clear ip - self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") - self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf1, "#") - self.tester.send_expect("ifconfig %s 0.0.0.0" % self.tester_intf, "#") + self.vm0_sut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf1, "#") + self.tg_node.send_expect("ifconfig %s 0.0.0.0" % self.tg_intf, "#") def test_reset(self): """ verify reset the vf1 impact on VF0 """ self.verify( - self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), + self.check_pf_vf_link_status(self.vm0_sut, self.vm0_intf0), "VM0_VF0 link up failed", ) self.verify( - self.check_pf_vf_link_status(self.vm1_dut, self.vm1_intf0), + self.check_pf_vf_link_status(self.vm1_sut, self.vm1_intf0), "VM1_VF0 link up failed", ) # Link down VF1 in VM1 and check no impact on VF0 status - self.vm1_dut.send_expect("ifconfig %s down" % self.vm1_intf0, "#") - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + self.vm1_sut.send_expect("ifconfig %s down" % self.vm1_intf0, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") # Unload VF1 kernel driver and expect no problem for VF0 if self.kdriver == "i40e": - self.vm1_dut.send_expect("rmmod iavf", "#") + self.vm1_sut.send_expect("rmmod iavf", "#") else: - self.vm1_dut.send_expect("rmmod %svf" % self.kdriver, "#") - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + self.vm1_sut.send_expect("rmmod %svf" % self.kdriver, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac), "Unload VF1 kernel driver impact VF0", ) self.verify( - self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), + self.check_pf_vf_link_status(self.vm0_sut, self.vm0_intf0), "vm0_vf0 link down", ) time.sleep(10) - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac), "reset PF testpmd impact VF RX failure", ) if self.kdriver == "i40e": - self.vm1_dut.send_expect("modprobe iavf", "#") + self.vm1_sut.send_expect("modprobe iavf", "#") else: - self.vm1_dut.send_expect("modprobe %svf" % self.kdriver, "#") - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + self.vm1_sut.send_expect("modprobe %svf" % self.kdriver, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac), "load VF1 kernel driver impact VF0", ) if self.kdriver == "i40e": - self.vm1_dut.send_expect("rmmod iavf", "#") + self.vm1_sut.send_expect("rmmod iavf", "#") else: - self.vm1_dut.send_expect("rmmod %svf" % self.kdriver, "#") - out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") + self.vm1_sut.send_expect("rmmod %svf" % self.kdriver, "#") + out = self.vm0_sut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac), "Reset VF1 kernel driver impact VF0", ) if self.kdriver == "i40e": - self.vm1_dut.send_expect("modprobe iavf", "#") + self.vm1_sut.send_expect("modprobe iavf", "#") else: - self.vm1_dut.send_expect("modprobe %svf" % self.kdriver, "#") + self.vm1_sut.send_expect("modprobe %svf" % self.kdriver, "#") def test_address(self): """ @@ -364,103 +364,103 @@ class TestVfKernel(TestCase): random_ip = random.randint(2, 249) vm0_ip0 = "5.5.5.%d" % random_ip pf_ip = "5.5.5.%d" % (random_ip + 2) - self.vm0_dut.send_expect( + self.vm0_sut.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.vm0_intf0, vm0_ip0), "#" ) - self.tester.send_expect( - "ifconfig %s %s netmask 255.255.255.0" % (self.tester_intf, pf_ip), "#" + self.tg_node.send_expect( + "ifconfig %s %s netmask 255.255.255.0" % (self.tg_intf, pf_ip), "#" ) # pf ping vm0_vf0 self.verify( - self.ping4(self.tester, self.tester_intf, vm0_ip0), - "%s ping %s failed" % (self.tester_intf, vm0_ip0), + self.ping4(self.tg_node, self.tg_intf, vm0_ip0), + "%s ping %s failed" % (self.tg_intf, vm0_ip0), ) # vm0_vf0 ping pf self.verify( - self.ping4(self.vm0_dut, self.vm0_intf0, pf_ip), + self.ping4(self.vm0_sut, self.vm0_intf0, pf_ip), "%s ping %s failed" % (self.vm0_intf0, pf_ip), ) # clear ip - self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") - self.tester.send_expect("ifconfig %s 0.0.0.0" % self.tester_intf, "#") + self.vm0_sut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") + self.tg_node.send_expect("ifconfig %s 0.0.0.0" % self.tg_intf, "#") # ipv6 test: add_ipv6 = "efdd::9fc8:6a6d:c232:f1c0" - self.vm0_dut.send_expect("ifconfig %s add %s" % (self.vm0_intf0, add_ipv6), "#") - out = self.vm0_dut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) + self.vm0_sut.send_expect("ifconfig %s add %s" % (self.vm0_intf0, add_ipv6), "#") + out = self.vm0_sut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) self.verify(add_ipv6 in out, "Failed to add ipv6 address") - self.vm0_dut.send_expect("ifconfig %s del %s" % (self.vm0_intf0, add_ipv6), "#") - out = self.vm0_dut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) + self.vm0_sut.send_expect("ifconfig %s del %s" % (self.vm0_intf0, add_ipv6), "#") + out = self.vm0_sut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) self.verify(add_ipv6 not in out, "Failed to del ipv6 address") # mac test: modify_mac = "aa:bb:cc:dd:ee:ff" - self.vm0_dut.send_expect( + self.vm0_sut.send_expect( "ifconfig %s hw ether %s" % (self.vm0_intf0, modify_mac), "#" ) - out = self.vm0_dut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) + out = self.vm0_sut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) self.verify(modify_mac in out, "Failed to add mac address") time.sleep(5) self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, modify_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, modify_mac), "modify mac address can't received packet", ) def verify_vm_tcpdump( - self, vm_dut, intf, mac, pkt_lens=64, num=1, vlan_id="", param="" + self, vm_sut, intf, mac, pkt_lens=64, num=1, vlan_id="", param="" ): - vm_dut.send_expect( - "tcpdump -i %s %s -e ether src %s" % (intf, param, self.tester_mac), + vm_sut.send_expect( + "tcpdump -i %s %s -e ether src %s" % (intf, param, self.tg_mac), "tcpdump", 10, ) time.sleep(2) self.send_packet(mac, pkt_lens, num, vlan_id) - out = vm_dut.get_session_output(timeout=10) - vm_dut.send_expect("^C", "#", 10) - if self.tester_mac in out: + out = vm_sut.get_session_output(timeout=10) + vm_sut.send_expect("^C", "#", 10) + if self.tg_mac in out: return True else: return False def send_packet(self, mac, pkt_lens=64, num=1, vlan_id=""): if vlan_id == "": - pkt = Packet(pkt_type="TCP", pkt_len=pkt_lens) - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.send_pkt(self.tester, tx_port=self.tester_intf, count=num) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="TCP", pkt_len=pkt_lens) + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf, count=num) else: - pkt = Packet(pkt_type="VLAN_UDP", pkt_len=pkt_lens) - pkt.config_layer("ether", {"dst": mac, "src": self.tester_mac}) - pkt.config_layer("vlan", {"vlan": vlan_id}) - pkt.send_pkt(self.tester, tx_port=self.tester_intf, count=num) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP", pkt_len=pkt_lens) + scapy_pkt_builder.config_layer("ether", {"dst": mac, "src": self.tg_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": vlan_id}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf, count=num) def test_vlan(self): """ verify add/delete vlan """ vlan_ids = random.randint(1, 4095) - self.dut_testpmd.execute_cmd("vlan set filter on 0") - self.dut_testpmd.execute_cmd("vlan set strip on 0") - self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf1, "#") - vm0_vf1_mac = self.vm0_dut.ports_info[1]["port"].get_mac_addr() + self.sut_testpmd.execute_cmd("vlan set filter on 0") + self.sut_testpmd.execute_cmd("vlan set strip on 0") + self.vm0_sut.send_expect("ifconfig %s up" % self.vm0_intf1, "#") + vm0_vf1_mac = self.vm0_sut.ports_info[1]["port"].get_mac_addr() - self.vm0_dut.send_expect("modprobe 8021q", "#") - out = self.vm0_dut.send_expect("lsmod |grep 8021q", "#") + self.vm0_sut.send_expect("modprobe 8021q", "#") + out = self.vm0_sut.send_expect("lsmod |grep 8021q", "#") self.verify("8021q" in out, "modprobe 8021q failure") # Add random vlan id(0~4095) on kernel VF0 - self.vm0_dut.send_expect("vconfig add %s %s" % (self.vm0_intf1, vlan_ids), "#") - out = self.vm0_dut.send_expect("ls /proc/net/vlan/ ", "#") + self.vm0_sut.send_expect("vconfig add %s %s" % (self.vm0_intf1, vlan_ids), "#") + out = self.vm0_sut.send_expect("ls /proc/net/vlan/ ", "#") self.verify("%s.%s" % (self.vm0_intf1, vlan_ids) in out, "take vlan id failure") - # Send packet from tester to VF MAC with not-matching vlan id, check + # Send packet from TG to VF MAC with not-matching vlan id, check # the packet can't be received at the vlan device # Intel® Ethernet 700 Series nic need add -p parameter to disable promisc mode wrong_vlan = vlan_ids % 4095 + 1 self.verify( self.verify_vm_tcpdump( - self.vm0_dut, + self.vm0_sut, self.vm0_intf1, vm0_vf1_mac, vlan_id="%d" % wrong_vlan, @@ -469,10 +469,10 @@ class TestVfKernel(TestCase): == False, "received wrong vlan packet", ) - # Send packet from tester to VF MAC with matching vlan id, check the packet can be received at the vlan device. - # check_result = self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, self.vm0_vf0_mac, vlan_id='%d' %vlan_ids) + # Send packet from TG to VF MAC with matching vlan id, check the packet can be received at the vlan device. + # check_result = self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, self.vm0_vf0_mac, vlan_id='%d' %vlan_ids) check_result = self.verify_vm_tcpdump( - self.vm0_dut, + self.vm0_sut, self.vm0_intf1, vm0_vf1_mac, vlan_id="%d" % vlan_ids, @@ -481,14 +481,14 @@ class TestVfKernel(TestCase): self.verify(check_result, "can't received vlan_id=%d packet" % vlan_ids) # Delete configured vlan device - self.vm0_dut.send_expect("vconfig rem %s.%s" % (self.vm0_intf1, vlan_ids), "#") - out = self.vm0_dut.send_expect("ls /proc/net/vlan/ ", "#") + self.vm0_sut.send_expect("vconfig rem %s.%s" % (self.vm0_intf1, vlan_ids), "#") + out = self.vm0_sut.send_expect("ls /proc/net/vlan/ ", "#") self.verify("%s.%s" % (self.vm0_intf1, vlan_ids) not in out, "vlan error") # behavior is different between 82599 and 700 Series ,because of kernel # driver self.verify( self.verify_vm_tcpdump( - self.vm0_dut, + self.vm0_sut, self.vm0_intf1, vm0_vf1_mac, vlan_id="%d" % vlan_ids, @@ -497,29 +497,29 @@ class TestVfKernel(TestCase): == False, "delete vlan error", ) - self.dut_testpmd.execute_cmd("vlan set filter off 0") + self.sut_testpmd.execute_cmd("vlan set filter off 0") def test_packet_statistic(self): """ verify packet statistic """ time.sleep(1) - self.vm0_dut.send_expect("sysctl net.ipv6.conf.all.disable_ipv6=1", "#") - self.vm0_dut.send_expect("sysctl net.ipv6.conf.default.disable_ipv6=1", "#") + self.vm0_sut.send_expect("sysctl net.ipv6.conf.all.disable_ipv6=1", "#") + self.vm0_sut.send_expect("sysctl net.ipv6.conf.default.disable_ipv6=1", "#") time.sleep(10) - out = self.vm0_dut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") rx_packets_before = re.findall("\s*rx.*\d+.*packets:\s*(\d*)", out) nb_rx_pkts_before = 0 for i in range(len(rx_packets_before)): nb_rx_pkts_before += int(rx_packets_before[i]) - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, num=10), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac, num=10), "VM reveive packet failed", ) - out = self.vm0_dut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") rx_packets_after = re.findall("\s*rx.*\d+.*packets:\s*(\d*)", out) nb_rx_pkts_after = 0 for i in range(len(rx_packets_after)): @@ -554,84 +554,84 @@ class TestVfKernel(TestCase): HW limitation on 82599, need add '--max-pkt-len=' on testpmd to set mtu value, all the VFs and PF share same MTU, the largest one take effect. """ - vm0_intf0 = self.vm0_dut.ports_info[0]["intf"] - vm0_intf1 = self.vm0_dut.ports_info[1]["intf"] - self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") - out = self.vm0_dut.send_expect("ifconfig %s" % self.vm0_intf0, "#") + vm0_intf0 = self.vm0_sut.ports_info[0]["intf"] + vm0_intf1 = self.vm0_sut.ports_info[1]["intf"] + self.vm0_sut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") + out = self.vm0_sut.send_expect("ifconfig %s" % self.vm0_intf0, "#") result = re.search("mtu\W1500", out, flags=re.I) self.verify(result is not None, "modify MTU failed") - self.tester.send_expect("ifconfig %s mtu 3000" % self.tester_intf, "#") + self.tg_node.send_expect("ifconfig %s mtu 3000" % self.tg_intf, "#") - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("set promisc all off") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("set promisc all off") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") # Send one packet with length as 2000 with DPDK PF MAC as DEST MAC, # check that DPDK PF can't receive packet self.send_packet(self.dmac, pkt_lens=2000) - out = self.dut.get_session_output(timeout=10) + out = self.sut_node.get_session_output(timeout=10) self.verify(self.dmac.upper() not in out, "PF receive error packet") # send one packet with length as 2000 with kernel VF MAC as DEST MAC, # check that Kernel VF can't receive packet - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( self.verify_vm_tcpdump( - self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, pkt_lens=2000 + self.vm0_sut, self.vm0_intf0, vm0_vf0_mac, pkt_lens=2000 ) == False, "kernel VF receive error packet", ) # Change DPDK PF mtu as 3000,check no confusion/crash on kernel VF - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("port stop all") - self.dut_testpmd.execute_cmd("port config mtu 0 3000") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port config mtu 0 3000") + self.sut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("set promisc all off") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("set promisc all off") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") # sleep 5s to wait vf up , because of pf down-up self.verify( - self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), + self.check_pf_vf_link_status(self.vm0_sut, self.vm0_intf0), "VM0_VF0 link down", ) # clear output - self.dut.get_session_output(timeout=10) + self.sut_node.get_session_output(timeout=10) # send one packet with length as 2000 with DPDK PF MAC as DEST MAC , # check that DPDK PF can receive packet self.send_packet(self.dmac, pkt_lens=2000) - out = self.dut.get_session_output(timeout=10) + out = self.sut_node.get_session_output(timeout=10) self.verify(self.dmac.upper() in out, "PF can't receive packet") # Change kernel VF mtu as 3000,check no confusion/crash on DPDK PF - self.vm0_dut.send_expect("ifconfig %s mtu 3000" % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s mtu 3000" % self.vm0_intf0, "#") # send one packet with length as 2000 with kernel VF MAC as DEST MAC, # check Kernel VF can receive packet - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( self.verify_vm_tcpdump( - self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, pkt_lens=2000 + self.vm0_sut, self.vm0_intf0, vm0_vf0_mac, pkt_lens=2000 ), "VF can't receive packet", ) - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("port stop all") - self.dut_testpmd.execute_cmd("port config mtu 0 1500") - self.dut_testpmd.execute_cmd("port start all") + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("port stop all") + self.sut_testpmd.execute_cmd("port config mtu 0 1500") + self.sut_testpmd.execute_cmd("port start all") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("start") - self.vm0_dut.send_expect("ifconfig %s mtu 1500" % self.vm0_intf0, "#", 10) + self.vm0_sut.send_expect("ifconfig %s mtu 1500" % self.vm0_intf0, "#", 10) def test_promisc_mode(self): """ @@ -643,87 +643,87 @@ class TestVfKernel(TestCase): wrong_mac = "01:02:03:04:05:06" # Set up kernel VF tcpdump with -p parameter, which means disable promisc # Start DPDK PF, enable promisc mode, set rxonly forwarding - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("set promisc all on") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("set promisc all on") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") self.verify( - self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), + self.check_pf_vf_link_status(self.vm0_sut, self.vm0_intf0), "VM0_VF0 link down", ) - self.dut.get_session_output() + self.sut_node.get_session_output() - # Send packet from tester to VF with correct DST MAC, check the packet + # Send packet from TG to VF with correct DST MAC, check the packet # can be received by kernel VF - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac), "VM reveive packet failed", ) - # Send packet from tester to PF with correct DST MAC, check the packet + # Send packet from TG to PF with correct DST MAC, check the packet # can be received by DPDK PF self.send_packet(self.dmac) - out = self.dut.get_session_output() - self.verify(self.tester_mac.upper() in out, "PF reveive packet failed") + out = self.sut_node.get_session_output() + self.verify(self.tg_mac.upper() in out, "PF reveive packet failed") - # Send packet from tester with random DST MAC, check the packet can be + # Send packet from TG with random DST MAC, check the packet can be # received by DPDK PF and kernel VF self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, wrong_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, wrong_mac), "VM reveive misc packet failed", ) self.send_packet(wrong_mac) - out = self.dut.get_session_output() - self.verify(self.tester_mac.upper() in out, "PF reveive misc packet failed") + out = self.sut_node.get_session_output() + self.verify(self.tg_mac.upper() in out, "PF reveive misc packet failed") - # Send packet from tester to VF with correct DST MAC, check the packet + # Send packet from TG to VF with correct DST MAC, check the packet # can be received by kernel VF - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, vm0_vf0_mac), "VM reveive packet failed", ) - # Send packet from tester to PF with correct DST MAC, check the packet + # Send packet from TG to PF with correct DST MAC, check the packet # can be received by DPDK PF self.send_packet(self.dmac) - out = self.dut.get_session_output() - self.verify(self.tester_mac.upper() in out, "PF reveive packet failed") + out = self.sut_node.get_session_output() + self.verify(self.tg_mac.upper() in out, "PF reveive packet failed") # Disable DPDK PF promisc mode - self.dut_testpmd.execute_cmd("stop") - self.dut_testpmd.execute_cmd("set promisc all off") - self.dut_testpmd.execute_cmd("set fwd rxonly") - self.dut_testpmd.execute_cmd("set verbose 1") - self.dut_testpmd.execute_cmd("start") - self.dut.get_session_output() + self.sut_testpmd.execute_cmd("stop") + self.sut_testpmd.execute_cmd("set promisc all off") + self.sut_testpmd.execute_cmd("set fwd rxonly") + self.sut_testpmd.execute_cmd("set verbose 1") + self.sut_testpmd.execute_cmd("start") + self.sut_node.get_session_output() # Set up kernel VF tcpdump with -p parameter, which means disable promisc mode - # Send packet from tester with random DST MAC, check the packet can't + # Send packet from TG with random DST MAC, check the packet can't # be received by DPDK PF and kernel VF self.verify( - self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, wrong_mac, param="-p") + self.verify_vm_tcpdump(self.vm0_sut, self.vm0_intf0, wrong_mac, param="-p") == False, "VM should not reveive misc packet", ) self.send_packet(wrong_mac) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify(wrong_mac not in out, "PF should not receive misc packet") - # Send packet from tester to VF with correct DST MAC, check the packet + # Send packet from TG to VF with correct DST MAC, check the packet # can be received by kernel VF - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.verify( self.verify_vm_tcpdump( - self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, param="-p" + self.vm0_sut, self.vm0_intf0, vm0_vf0_mac, param="-p" ), "VM reveive packet failed", ) - # Send packet from tester to PF with correct DST MAC, check the packet + # Send packet from TG to PF with correct DST MAC, check the packet # can be received by DPDK PF self.send_packet(self.dmac) - out = self.dut.get_session_output() - self.verify(self.tester_mac.upper() in out, "PF reveive packet failed") + out = self.sut_node.get_session_output() + self.verify(self.tg_mac.upper() in out, "PF reveive packet failed") def test_rss(self): """ @@ -737,35 +737,35 @@ class TestVfKernel(TestCase): # Verify kernel VF RSS using ethtool -"l" (lower case L) that the # default RSS setting is equal to the number of CPUs in the system and # that the maximum number of RSS queues displayed is correct for the - # DUT + # NIC ports self.verify( - self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), + self.check_pf_vf_link_status(self.vm0_sut, self.vm0_intf0), "VM0_VF0 link down", ) - cpus = self.vm0_dut.send_expect( + cpus = self.vm0_sut.send_expect( "cat /proc/cpuinfo| grep 'processor'| wc -l", "#" ) - out = self.vm0_dut.send_expect("ethtool -l %s" % self.vm0_intf0, "#", 10) + out = self.vm0_sut.send_expect("ethtool -l %s" % self.vm0_intf0, "#", 10) combined = re.findall("Combined:\s*(\d*)", out) self.verify(cpus == combined[0], "the queues count error") # Run "ethtool -S | grep rx_bytes | column" to see the current # queue count and verify that it is correct to step 1 - out = self.vm0_dut.send_expect( + out = self.vm0_sut.send_expect( "ethtool -S %s |grep rx-.*bytes" % self.vm0_intf0, "#" ) rx_bytes_before = re.findall("rx-.*bytes:\s*(\d*)", out) self.verify(len(rx_bytes_before) == int(combined[0]), "the queues count error") - # Send multi-threaded traffics to the DUT with a number of threads + # Send multi-threaded traffics to the SUT with a number of threads # Check kernel VF each queue can receive packets - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() for i in range(5): mythread = threading.Thread(target=self.send_packet(vm0_vf0_mac)) mythread.start() - out = self.vm0_dut.send_expect( + out = self.vm0_sut.send_expect( "ethtool -S %s |grep rx-*bytes" % self.vm0_intf0, "#" ) rx_bytes_after = re.findall("rx-*.bytes:\s*(\d*)", out) @@ -778,12 +778,12 @@ class TestVfKernel(TestCase): """ Check DPDK VF0 and kernel VF1 don't impact each other and no performance drop """ - self.vm0_dut.send_expect("ifconfig %s up " % self.vm0_intf0, "#") - self.vm0_dut.send_expect("ifconfig %s up " % self.vm0_intf1, "#") - self.vm0_dut.ports_info[1]["port"].bind_driver("vfio-pci") + self.vm0_sut.send_expect("ifconfig %s up " % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s up " % self.vm0_intf1, "#") + self.vm0_sut.ports_info[1]["port"].bind_driver("vfio-pci") # because of alt_session is false, host cmd need to execute before testpmd start - vm0_vf0_mac = self.vm0_dut.ports_info[0]["port"].get_mac_addr() + vm0_vf0_mac = self.vm0_sut.ports_info[0]["port"].get_mac_addr() self.vm0_testpmd.start_testpmd("Default") self.vm0_testpmd.execute_cmd("set promisc all on") @@ -796,7 +796,7 @@ class TestVfKernel(TestCase): macs = [vm0_vf0_mac, vm0_vf1_mac] self.generate_pcap_pkt(macs) - vm0_newvmsession = self.vm0_dut.new_session() + vm0_newvmsession = self.vm0_sut.new_session() date_old = datetime.datetime.now() date_new = date_old + datetime.timedelta(minutes=0.5) while 1: @@ -807,7 +807,7 @@ class TestVfKernel(TestCase): ) self.send_packets() - out = self.vm0_dut.get_session_output(timeout=20) + out = self.vm0_sut.get_session_output(timeout=20) self.verify( self.src_logo in out, "VM PF Confiscated to the specified package" ) @@ -825,10 +825,10 @@ class TestVfKernel(TestCase): time.sleep(3) def send_packets(self): - self.tester.scapy_foreground() - self.tester.scapy_append("pkts=rdpcap('flow.pcap')") - self.tester.scapy_append("sendp(pkts, iface='%s')" % self.tester_intf) - self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append("pkts=rdpcap('flow.pcap')") + self.tg_node.scapy_append("sendp(pkts, iface='%s')" % self.tg_intf) + self.tg_node.scapy_execute() def reboot_vm1(self): """ @@ -837,17 +837,17 @@ class TestVfKernel(TestCase): self.vm1.stop() vf1_prop_5 = {"opt_host": self.sriov_vfs_port[4].pci} vf1_prop_6 = {"opt_host": self.sriov_vfs_port[5].pci} - self.vm1 = QEMUKvm(self.dut, "vm1", "vf_kernel") + self.vm1 = QEMUKvm(self.sut_node, "vm1", "vf_kernel") self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop_5) self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop_6) try: - self.vm1_dut = self.vm1.start() - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start() + if self.vm1_sut is None: raise Exception("Set up VM1 ENV failed!") else: self.verify( - self.vm1_dut.ports_info[0]["intf"] != "N/A", "Not interface" + self.vm1_sut.ports_info[0]["intf"] != "N/A", "Not interface" ) except Exception as e: self.destroy_vm_env() @@ -860,30 +860,30 @@ class TestVfKernel(TestCase): Host one DPDK PF and create 6 VFs, pass through VF0, VF1, VF2 and VF3 to VM0, pass through VF4, VF5 to VM1, power on VM0 and VM1. Load host DPDK driver, VM DPDK driver and kernel driver. """ - for port_info in self.vm0_dut.ports_info: + for port_info in self.vm0_sut.ports_info: vm0_intf = port_info["intf"] self.verify( - self.check_pf_vf_link_status(self.vm0_dut, vm0_intf), + self.check_pf_vf_link_status(self.vm0_sut, vm0_intf), "VM0_vf: %s link down" % vm0_intf, ) - for port_info in self.vm1_dut.ports_info: + for port_info in self.vm1_sut.ports_info: vm1_intf = port_info["intf"] self.verify( - self.check_pf_vf_link_status(self.vm1_dut, vm1_intf), + self.check_pf_vf_link_status(self.vm1_sut, vm1_intf), "VM1_vf: %s link down" % vm1_intf, ) # Bind kernel VF0, VF1 to vfio-pci in VM0, bind kernel VF4 to vfio-pci in # VM1 - self.vm0_dut.ports_info[0]["port"].bind_driver("vfio-pci") - self.vm0_dut.ports_info[1]["port"].bind_driver("vfio-pci") - self.vm1_dut.ports_info[0]["port"].bind_driver("vfio-pci") + self.vm0_sut.ports_info[0]["port"].bind_driver("vfio-pci") + self.vm0_sut.ports_info[1]["port"].bind_driver("vfio-pci") + self.vm1_sut.ports_info[0]["port"].bind_driver("vfio-pci") # because of alt_session is false, host cmd need to execute before testpmd start - vm0_vf2_mac = self.vm0_dut.ports_info[2]["port"].get_mac_addr() - vm0_vf3_mac = self.vm0_dut.ports_info[3]["port"].get_mac_addr() - vm1_vf1_mac = self.vm1_dut.ports_info[1]["port"].get_mac_addr() + vm0_vf2_mac = self.vm0_sut.ports_info[2]["port"].get_mac_addr() + vm0_vf3_mac = self.vm0_sut.ports_info[3]["port"].get_mac_addr() + vm1_vf1_mac = self.vm1_sut.ports_info[1]["port"].get_mac_addr() # Start DPDK VF0, VF1 in VM0 and VF4 in VM1, enable promisc mode self.vm0_testpmd.start_testpmd("Default") @@ -901,8 +901,8 @@ class TestVfKernel(TestCase): vm0_vf0_mac = self.vm0_testpmd.get_port_mac(0) vm0_vf1_mac = self.vm0_testpmd.get_port_mac(1) vm1_vf0_mac = self.vm1_testpmd.get_port_mac(0) - pf0_mac = self.dut_testpmd.get_port_mac(0) - pf1_mac = self.dut_testpmd.get_port_mac(1) + pf0_mac = self.sut_testpmd.get_port_mac(0) + pf1_mac = self.sut_testpmd.get_port_mac(1) macs = [ vm0_vf0_mac, @@ -918,34 +918,34 @@ class TestVfKernel(TestCase): self.send_packets() - vm0_vf2_newvmsession = self.vm0_dut.new_session() - vm0_vf3_newvmsession = self.vm0_dut.new_session() - vm1_newvmsession = self.vm1_dut.new_session() + vm0_vf2_newvmsession = self.vm0_sut.new_session() + vm0_vf3_newvmsession = self.vm0_sut.new_session() + vm1_newvmsession = self.vm1_sut.new_session() # Set up kernel VF2, VF3 in VM0 and VF5 in VM1 tcpdump without -p # parameter on promisc mode vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[2]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[2]["intf"], self.src_logo), "tcpdump", 10, ) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[3]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[3]["intf"], self.src_logo), "tcpdump", 10, ) vm1_newvmsession.send_expect( "tcpdump -i %s -e -p ether src %s" - % (self.vm0_dut.ports_info[1]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[1]["intf"], self.src_logo), "tcpdump", 10, ) self.send_packets() - out = self.vm0_dut.get_session_output() + out = self.vm0_sut.get_session_output() self.verify(self.src_logo in out, "VM0 PF Confiscated to the specified package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") @@ -962,7 +962,7 @@ class TestVfKernel(TestCase): "vm0 vf3 Confiscated to the specified package", ) - out = self.vm1_dut.get_session_output() + out = self.vm1_sut.get_session_output() self.verify(self.src_logo in out, "VM1 PF Confiscated to the specified package") vm1_vf1_out = vm1_newvmsession.send_expect("^C", "#") @@ -982,26 +982,26 @@ class TestVfKernel(TestCase): vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[2]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[2]["intf"], self.src_logo), "tcpdump", 10, ) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[3]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[3]["intf"], self.src_logo), "tcpdump", 10, ) vm1_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[1]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[1]["intf"], self.src_logo), "tcpdump", 10, ) self.send_packets() - out = self.vm0_dut.get_session_output() + out = self.vm0_sut.get_session_output() self.verify(self.src_logo in out, "link down impact VM0 PF receive package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") @@ -1016,7 +1016,7 @@ class TestVfKernel(TestCase): vm0_vf3_out_rx_packet[0] == "1", "link down impact vm0 vf3 receive package" ) - out = self.vm1_dut.get_session_output() + out = self.vm1_sut.get_session_output() self.verify(self.src_logo in out, "link down impact VM1 PF receive package") vm1_vf1_out = vm1_newvmsession.send_expect("^C", "#") @@ -1027,25 +1027,25 @@ class TestVfKernel(TestCase): # Link down kernel VF2 and expect no impact on other VFs vm0_vf2_newvmsession.send_expect( - "ifconfig %s down" % self.vm0_dut.ports_info[2]["intf"], "#", 10 + "ifconfig %s down" % self.vm0_sut.ports_info[2]["intf"], "#", 10 ) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[3]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[3]["intf"], self.src_logo), "tcpdump", 10, ) vm1_newvmsession.send_expect( "tcpdump -i -p %s -e ether src %s" - % (self.vm0_dut.ports_info[1]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[1]["intf"], self.src_logo), "tcpdump", 10, ) self.send_packets() - out = self.vm0_dut.get_session_output() + out = self.vm0_sut.get_session_output() self.verify( self.src_logo in out, "link down kernel vf2 impact VM0 PF receive package" ) @@ -1057,7 +1057,7 @@ class TestVfKernel(TestCase): "link down kernel vf2 impact vm0 vf3 receive package", ) - out = self.vm1_dut.get_session_output() + out = self.vm1_sut.get_session_output() self.verify( self.src_logo in out, "link down kernel vf2 impact VM1 PF receive package" ) @@ -1070,7 +1070,7 @@ class TestVfKernel(TestCase): ) vm0_vf2_newvmsession.send_expect( - "ifconfig %s up" % self.vm0_dut.ports_info[2]["intf"], "#" + "ifconfig %s up" % self.vm0_sut.ports_info[2]["intf"], "#" ) # Quit VF4 DPDK testpmd and expect no impact on other VFs @@ -1078,26 +1078,26 @@ class TestVfKernel(TestCase): vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[2]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[2]["intf"], self.src_logo), "tcpdump", 10, ) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[3]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[3]["intf"], self.src_logo), "tcpdump", 10, ) vm1_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[1]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[1]["intf"], self.src_logo), "tcpdump", 10, ) self.send_packets() - out = self.vm0_dut.get_session_output() + out = self.vm0_sut.get_session_output() self.verify( self.src_logo in out, "quit vf4 DPDK testpmd impact VM0 PF receive package" ) @@ -1132,24 +1132,24 @@ class TestVfKernel(TestCase): # Unload VF5 kernel driver and expect no impact on other VFs vm1_newvmsession.send_expect( "./usertools/dpdk-devbind.py -b pci-stub %s" - % (self.vm1_dut.ports_info[1]["pci"]), + % (self.vm1_sut.ports_info[1]["pci"]), "#", ) vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[2]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[2]["intf"], self.src_logo), "tcpdump", ) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[3]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[3]["intf"], self.src_logo), "tcpdump", ) self.send_packets() - out = self.vm0_dut.get_session_output() + out = self.vm0_sut.get_session_output() self.verify( self.src_logo in out, "unload vf5 kernel driver impact VM0 PF receive package", @@ -1169,30 +1169,30 @@ class TestVfKernel(TestCase): "unload vf5 kernel driver impact vm0 vf3 receive package", ) - out = self.vm1_dut.get_session_output(timeout=20) + out = self.vm1_sut.get_session_output(timeout=20) self.verify( self.src_logo in out, "unload vf5 kernel driver impact VM1 PF receive package", ) # Reboot VM1 and expect no impact on VFs of VM0 - self.vm1_dut.send_expect("quit", "#") + self.vm1_sut.send_expect("quit", "#") self.reboot_vm1() vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[2]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[2]["intf"], self.src_logo), "tcpdump", ) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" - % (self.vm0_dut.ports_info[3]["intf"], self.src_logo), + % (self.vm0_sut.ports_info[3]["intf"], self.src_logo), "tcpdump", ) self.send_packets() - out = self.vm0_dut.get_session_output() + out = self.vm0_sut.get_session_output() self.verify(self.src_logo in out, "reboot vm1 impact VM0 PF receive package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") @@ -1213,19 +1213,19 @@ class TestVfKernel(TestCase): """ for i in range(100): if self.kdriver == "i40e": - out = self.vm0_dut.send_expect("rmmod iavf", "#") + out = self.vm0_sut.send_expect("rmmod iavf", "#") self.verify("error" not in out, "stress error for rmmod iavf:%s" % out) - out = self.vm0_dut.send_expect("modprobe iavf", "#") + out = self.vm0_sut.send_expect("modprobe iavf", "#") self.verify( "error" not in out, "stress error for modprobe iavf:%s" % out ) else: - out = self.vm0_dut.send_expect("rmmod %svf" % self.kdriver, "#") + out = self.vm0_sut.send_expect("rmmod %svf" % self.kdriver, "#") self.verify( "error" not in out, "stress error for rmmod %svf:%s" % (self.kdriver, out), ) - out = self.vm0_dut.send_expect("modprobe %svf" % self.kdriver, "#") + out = self.vm0_sut.send_expect("modprobe %svf" % self.kdriver, "#") self.verify( "error" not in out, "stress error for modprobe %svf:%s" % (self.kdriver, out), @@ -1236,25 +1236,25 @@ class TestVfKernel(TestCase): Run after each test case. """ self.vm0_testpmd.quit() - self.vm0_dut.restore_interfaces_linux() + self.vm0_sut.restore_interfaces_linux() if getattr(self, "vm0_newvmsession", None): - self.vm0_dut.close_session(vm0_newvmsession) + self.vm0_sut.close_session(vm0_newvmsession) if getattr(self, "vm0_vf2_newvmsession", None): - self.vm0_dut.close_session(vm0_vf2_newvmsession) + self.vm0_sut.close_session(vm0_vf2_newvmsession) if getattr(self, "vm0_vf3_newvmsession", None): - self.vm0_dut.close_session(vm0_vf3_newvmsession) + self.vm0_sut.close_session(vm0_vf3_newvmsession) # Sometime test failed ,we still need clear ip. - self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") - self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf1, "#") - self.tester.send_expect("ifconfig %s 0.0.0.0" % self.tester_intf, "#") + self.vm0_sut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") + self.vm0_sut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf1, "#") + self.tg_node.send_expect("ifconfig %s 0.0.0.0" % self.tg_intf, "#") time.sleep(5) def tear_down_all(self): """ Run after each test suite. """ - self.dut_testpmd.quit() + self.sut_testpmd.quit() self.destroy_vm_env() - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) diff --git a/tests/TestSuite_vf_l3fwd.py b/tests/TestSuite_vf_l3fwd.py index 99120811..e86a1030 100644 --- a/tests/TestSuite_vf_l3fwd.py +++ b/tests/TestSuite_vf_l3fwd.py @@ -7,10 +7,10 @@ import string import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVfL3fwd(TestCase): @@ -21,10 +21,10 @@ class TestVfL3fwd(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.requirt_ports_num = len(self.dut_ports) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.requirt_ports_num = len(self.sut_ports) global valports - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] # Verify that enough ports are available self.verify( @@ -33,8 +33,8 @@ class TestVfL3fwd(TestCase): # define vf's mac address self.vfs_mac = ["00:12:34:56:78:0%d" % (i + 1) for i in valports] # get socket and cores - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("1S/6C/1T", socket=self.socket) + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("1S/6C/1T", socket=self.socket) self.verify(self.cores is not None, "Requested 6 cores failed") # get test parameters: frames size, queues number @@ -54,7 +54,7 @@ class TestVfL3fwd(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") # get dts output path if self.logger.log_path.startswith(os.sep): @@ -63,7 +63,7 @@ class TestVfL3fwd(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_up(self): """ @@ -77,19 +77,19 @@ class TestVfL3fwd(TestCase): """ if host_driver != "default" and host_driver != "igb_uio": self.logger.error("only support kernel driver and igb_uio!") - self.used_dut_port = [port for port in self.dut_ports] + self.used_sut_port = [port for port in self.sut_ports] self.sriov_vfs_port = [] for i in valports: if host_driver == "default": - h_driver = self.dut.ports_info[i]["port"].default_driver - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port[i], 1, driver=h_driver + h_driver = self.sut_node.ports_info[i]["port"].default_driver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port[i], 1, driver=h_driver ) else: - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port[i], 1, driver=host_driver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port[i], 1, driver=host_driver ) - sriov_vfs_port = self.dut.ports_info[self.used_dut_port[i]]["vfs_port"] + sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port[i]]["vfs_port"] self.sriov_vfs_port.append(sriov_vfs_port) # bind vf to vf driver try: @@ -100,15 +100,15 @@ class TestVfL3fwd(TestCase): # set vf mac address. if host_driver == "default": for i in valports: - pf_intf = self.dut.ports_info[i]["port"].get_interface_name() - self.dut.send_expect( + pf_intf = self.sut_node.ports_info[i]["port"].get_interface_name() + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (pf_intf, self.vfs_mac[i]), "#" ) else: - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) eal_param = "--socket-mem=1024,1024 --file-prefix=pf" for i in valports: - eal_param += " -a %s" % self.dut.ports_info[i]["pci"] + eal_param += " -a %s" % self.sut_node.ports_info[i]["pci"] core_config = self.cores[: len(valports)] self.host_testpmd.start_testpmd(core_config, "", eal_param=eal_param) for i in valports: @@ -129,10 +129,10 @@ class TestVfL3fwd(TestCase): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None for i in valports: - if "vfs_port" in self.dut.ports_info[self.used_dut_port[i]].keys(): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port[i]) - port = self.dut.ports_info[self.used_dut_port[i]]["port"] - self.used_dut_port[i] = None + if "vfs_port" in self.sut_node.ports_info[self.used_sut_port[i]].keys(): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port[i]) + port = self.sut_node.ports_info[self.used_sut_port[i]]["port"] + self.used_sut_port[i] = None self.setup_vf_env_flag = 0 def flows(self): @@ -169,8 +169,8 @@ class TestVfL3fwd(TestCase): pcap = os.sep.join( [self.output_path, "dst{0}_{1}.pcap".format(index, cnt)] ) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, ",".join(flow))) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, ",".join(flow))) + self.tg_node.scapy_execute() if index not in pcaps: pcaps[index] = [] pcaps[index].append(pcap) @@ -184,12 +184,12 @@ class TestVfL3fwd(TestCase): tgen_input = [] for rxPort in valports: if rxPort % len(valports) == 0 or len(valports) % rxPort == 2: - txIntf = self.tester.get_local_port(valports[rxPort + 1]) + txIntf = self.tg_node.get_local_port(valports[rxPort + 1]) port_id = valports[rxPort + 1] else: - txIntf = self.tester.get_local_port(valports[rxPort - 1]) + txIntf = self.tg_node.get_local_port(valports[rxPort - 1]) port_id = valports[rxPort - 1] - rxIntf = self.tester.get_local_port(valports[rxPort]) + rxIntf = self.tg_node.get_local_port(valports[rxPort]) for pcap in pcaps[port_id]: tgen_input.append((txIntf, rxIntf, pcap)) return tgen_input @@ -198,7 +198,7 @@ class TestVfL3fwd(TestCase): """ vf l3fwd performance test """ - l3fwd_session = self.dut.new_session() + l3fwd_session = self.sut_node.new_session() header_row = ["Frame", "mode", "Mpps", "%linerate"] self.l3fwd_test_results["header"] = header_row self.result_table_create(header_row) @@ -220,15 +220,15 @@ class TestVfL3fwd(TestCase): vm_config = self.set_fields() # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, vm_config, self.tester.pktgen + tgenInput, 100, vm_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 30} - # _, pps = self.tester.traffic_generator_throughput(tgenInput, rate_percent=100, delay=30) - _, pps = self.tester.pktgen.measure_throughput( + # _, pps = self.tg_node.traffic_generator_throughput(tgenInput, rate_percent=100, delay=30) + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) self.verify(pps > 0, "No traffic detected") @@ -242,7 +242,7 @@ class TestVfL3fwd(TestCase): self.result_table_add(data_row) self.l3fwd_test_results["data"].append(data_row) - self.dut.close_session(l3fwd_session) + self.sut_node.close_session(l3fwd_session) self.result_table_print() def measure_vf_performance(self, host_driver="default", vf_driver="vfio-pci"): @@ -253,7 +253,7 @@ class TestVfL3fwd(TestCase): eal_param = "" for i in valports: eal_param += " -a " + self.sriov_vfs_port[i][0].pci - port_mask = utils.create_mask(self.dut_ports) + port_mask = utils.create_mask(self.sut_ports) # for IIntel® Ethernet 700 Series: XL710, XXV710, use 2c/2q per VF port for performance test , # for IIntel® Ethernet 700 Series: X710, 82599/500 Series, use 1c/1q per VF port for performance test @@ -266,7 +266,7 @@ class TestVfL3fwd(TestCase): for j in range(self.queue): queue_config += "({0}, {1}, {2}),".format(i, j, core_list[m]) m += 1 - app_name = self.dut.apps_name["l3fwd"] + app_name = self.sut_node.apps_name["l3fwd"] cmdline = ( app_name + "-c {0} -n 4 {1} -- -p {2} --config '{3}' --parse-ptype".format( @@ -291,9 +291,9 @@ class TestVfL3fwd(TestCase): ) def test_perf_dpdk_pf_dpdk_vf_perf_host_only(self): - for idx in self.dut_ports: + for idx in self.sut_ports: self.verify( - self.dut.ports_info[idx]["port"].default_driver != "ice", + self.sut_node.ports_info[idx]["port"].default_driver != "ice", "Intel® Ethernet 800 Series do not support generate vfs from igb_uio", ) @@ -303,7 +303,7 @@ class TestVfL3fwd(TestCase): "Use igb_uio as host driver for testing instead of %s" % self.drivername ) - self.dut.setup_modules_linux(self.target, "igb_uio", "") + self.sut_node.setup_modules_linux(self.target, "igb_uio", "") self.measure_vf_performance(host_driver="igb_uio", vf_driver="igb_uio") def test_perf_kernel_pf_dpdk_iavf_perf_host_only(self): @@ -312,9 +312,9 @@ class TestVfL3fwd(TestCase): Intel® Ethernet 800 Series iavf testing is same as Intel® Ethernet 700 Series VF, so use dpdk_pf_dpdk_vf_perf_host_only to test Intel® Ethernet 800 Series iavf """ - for idx in self.dut_ports: + for idx in self.sut_ports: self.verify( - self.dut.ports_info[idx]["port"].default_driver == "i40e", + self.sut_node.ports_info[idx]["port"].default_driver == "i40e", "The case is only designed for Intel® Ethernet 700 Series", ) @@ -330,19 +330,19 @@ class TestVfL3fwd(TestCase): self.logger.info( "Configure RX/TX descriptor to 2048, and re-build ./examples/l3fwd" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/define RTE_TEST_RX_DESC_DEFAULT.*$/" + "define RTE_TEST_RX_DESC_DEFAULT 2048/' ./examples/l3fwd/l3fwd.h", "#", 20, ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i -e 's/define RTE_TEST_TX_DESC_DEFAULT.*$/" + "define RTE_TEST_TX_DESC_DEFAULT 2048/' ./examples/l3fwd/l3fwd.h", "#", 20, ) - out = self.dut.build_dpdk_apps("./examples/l3fwd") + out = self.sut_node.build_dpdk_apps("./examples/l3fwd") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") @@ -361,4 +361,4 @@ class TestVfL3fwd(TestCase): self.destroy_vf_env() def tear_down_all(self): - self.dut.bind_interfaces_linux(self.drivername) + self.sut_node.bind_interfaces_linux(self.drivername) diff --git a/tests/TestSuite_vf_l3fwd_em_kernelpf.py b/tests/TestSuite_vf_l3fwd_em_kernelpf.py index b7e8550d..ef5359f5 100644 --- a/tests/TestSuite_vf_l3fwd_em_kernelpf.py +++ b/tests/TestSuite_vf_l3fwd_em_kernelpf.py @@ -28,13 +28,13 @@ class TestVfL3fwdEmKernelPf(TestCase, PerfTestBase): self.verify( self.nic in VF_L3FWD_NIC_SUPPORT, "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/6C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/6C/1T", socket=socket) self.verify(cores, "Requested 6 cores failed") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket, mode=SUITE_TYPE.VF) @@ -58,7 +58,7 @@ class TestVfL3fwdEmKernelPf(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_vf_throughput_ipv4_em(self): diff --git a/tests/TestSuite_vf_l3fwd_kernelpf.py b/tests/TestSuite_vf_l3fwd_kernelpf.py index dba4fc86..c9782b9d 100644 --- a/tests/TestSuite_vf_l3fwd_kernelpf.py +++ b/tests/TestSuite_vf_l3fwd_kernelpf.py @@ -28,13 +28,13 @@ class TestVfL3fwdKernelPf(TestCase, PerfTestBase): self.verify( self.nic in VF_L3FWD_NIC_SUPPORT, "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/6C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/6C/1T", socket=socket) self.verify(cores, "Requested 6 cores failed") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket, mode=SUITE_TYPE.VF) @@ -58,7 +58,7 @@ class TestVfL3fwdKernelPf(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_vf_rfc2544_ipv4_lpm(self): diff --git a/tests/TestSuite_vf_l3fwd_lpm_ipv4_kernelpf.py b/tests/TestSuite_vf_l3fwd_lpm_ipv4_kernelpf.py index 3aade4bc..1b031a47 100644 --- a/tests/TestSuite_vf_l3fwd_lpm_ipv4_kernelpf.py +++ b/tests/TestSuite_vf_l3fwd_lpm_ipv4_kernelpf.py @@ -28,13 +28,13 @@ class TestVfL3fwdLpmIpv4KernelPf(TestCase, PerfTestBase): self.verify( self.nic in VF_L3FWD_NIC_SUPPORT, "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/6C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/6C/1T", socket=socket) self.verify(cores, "Requested 6 cores failed") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket, mode=SUITE_TYPE.VF) @@ -58,7 +58,7 @@ class TestVfL3fwdLpmIpv4KernelPf(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_vf_throughput_ipv4_lpm(self): diff --git a/tests/TestSuite_vf_l3fwd_lpm_ipv4_rfc2544_kernelpf.py b/tests/TestSuite_vf_l3fwd_lpm_ipv4_rfc2544_kernelpf.py index 375ecbd5..75cc7088 100644 --- a/tests/TestSuite_vf_l3fwd_lpm_ipv4_rfc2544_kernelpf.py +++ b/tests/TestSuite_vf_l3fwd_lpm_ipv4_rfc2544_kernelpf.py @@ -28,13 +28,13 @@ class TestVfL3fwdLpmIpv4Rfc2544KernelPf(TestCase, PerfTestBase): self.verify( self.nic in VF_L3FWD_NIC_SUPPORT, "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/6C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/6C/1T", socket=socket) self.verify(cores, "Requested 6 cores failed") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket, mode=SUITE_TYPE.VF) @@ -58,7 +58,7 @@ class TestVfL3fwdLpmIpv4Rfc2544KernelPf(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_vf_rfc2544_ipv4_lpm(self): diff --git a/tests/TestSuite_vf_l3fwd_lpm_ipv6_kernelpf.py b/tests/TestSuite_vf_l3fwd_lpm_ipv6_kernelpf.py index 06cae6a0..b94aa883 100644 --- a/tests/TestSuite_vf_l3fwd_lpm_ipv6_kernelpf.py +++ b/tests/TestSuite_vf_l3fwd_lpm_ipv6_kernelpf.py @@ -28,13 +28,13 @@ class TestVfL3fwdLpmIpv6KernelPf(TestCase, PerfTestBase): self.verify( self.nic in VF_L3FWD_NIC_SUPPORT, "NIC Unsupported: " + str(self.nic) ) - self.dut_ports = self.dut.get_ports(self.nic) - valports = [_ for _ in self.dut_ports if self.tester.get_local_port(_) != -1] + self.sut_ports = self.sut_node.get_ports(self.nic) + valports = [_ for _ in self.sut_ports if self.tg_node.get_local_port(_) != -1] self.logger.debug(valports) self.verify_ports_number(valports) # get socket and cores - socket = self.dut.get_numa_id(self.dut_ports[0]) - cores = self.dut.get_core_list("1S/6C/1T", socket=socket) + socket = self.sut_node.get_numa_id(self.sut_ports[0]) + cores = self.sut_node.get_core_list("1S/6C/1T", socket=socket) self.verify(cores, "Requested 6 cores failed") # init l3fwd common base class parameters PerfTestBase.__init__(self, valports, socket, mode=SUITE_TYPE.VF) @@ -58,7 +58,7 @@ class TestVfL3fwdLpmIpv6KernelPf(TestCase, PerfTestBase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() self.perf_reset_cur_case() def test_perf_vf_throughput_ipv6_lpm(self): diff --git a/tests/TestSuite_vf_macfilter.py b/tests/TestSuite_vf_macfilter.py index c2007999..944826e4 100644 --- a/tests/TestSuite_vf_macfilter.py +++ b/tests/TestSuite_vf_macfilter.py @@ -19,8 +19,8 @@ class TestVfMacFilter(TestCase): vf0_setmac = "00:11:22:33:44:55" def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None self.pf0_vf0_mac = "00:12:34:56:78:01" @@ -33,7 +33,7 @@ class TestVfMacFilter(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def set_up(self): @@ -41,19 +41,19 @@ class TestVfMacFilter(TestCase): def setup_2pf_2vf_1vm_env(self, set_mac, driver="default"): - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - pf_intf0 = self.dut.ports_info[0]["port"].get_interface_name() + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + pf_intf0 = self.sut_node.ports_info[0]["port"].get_interface_name() if set_mac: - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (pf_intf0, self.pf0_vf0_mac), "#" ) - self.used_dut_port_1 = self.dut_ports[1] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver=driver) - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.used_sut_port_1 = self.sut_ports[1] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 1, driver=driver) + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] try: @@ -69,7 +69,7 @@ class TestVfMacFilter(TestCase): if driver == "igb_uio": # start testpmd without the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) if self.nic in [ "IXGBE_10G-82599_SFP", "IXGBE_10G-X550T", @@ -81,11 +81,11 @@ class TestVfMacFilter(TestCase): self.host_testpmd.start_testpmd("1S/2C/2T") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_macfilter") + self.vm0 = VM(self.sut_node, "vm0", "vf_macfilter") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") self.setup_2pf_2vf_1vm_env_flag = 1 @@ -100,30 +100,30 @@ class TestVfMacFilter(TestCase): self.vm0_testpmd.execute_cmd("stop") self.vm0_testpmd.execute_cmd("quit", "# ") self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm0 = None if getattr(self, "host_testpmd", None): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - if getattr(self, "used_dut_port_0", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] + if getattr(self, "used_sut_port_0", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] port.bind_driver() - self.used_dut_port_0 = None + self.used_sut_port_0 = None - if getattr(self, "used_dut_port_1", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - port = self.dut.ports_info[self.used_dut_port_1]["port"] + if getattr(self, "used_sut_port_1", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + port = self.sut_node.ports_info[self.used_sut_port_1]["port"] port.bind_driver() - self.used_dut_port_1 = None + self.used_sut_port_1 = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_2pf_2vf_1vm_env_flag = 0 @@ -143,8 +143,8 @@ class TestVfMacFilter(TestCase): def result_verify_iplink(self, set_mac): if set_mac == False: self.host_testpmd.execute_cmd("set vf mac addr 0 0 %s" % self.pf0_vf0_mac) - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) # Get VF's MAC pmd_vf0_mac = self.vm0_testpmd.get_port_mac(0) @@ -155,17 +155,17 @@ class TestVfMacFilter(TestCase): time.sleep(2) tgen_ports = [] - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_ports.append((tx_port, rx_port)) dst_mac = self.pf0_vf0_mac - src_mac = self.tester.get_mac(tx_port) + src_mac = self.tg_node.get_mac(tx_port) pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] print( "\nfirst send packets to the PF set MAC, expected result is RX packets=TX packets\n" ) - result1 = self.tester.check_random_pkts( + result1 = self.tg_node.check_random_pkts( tgen_ports, pktnum=100, allow_miss=False, params=pkt_param ) print( @@ -179,7 +179,7 @@ class TestVfMacFilter(TestCase): ) dst_mac = self.vf0_wrongmac pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] - result2 = self.tester.check_random_pkts( + result2 = self.tg_node.check_random_pkts( tgen_ports, pktnum=100, allow_miss=False, params=pkt_param ) print( @@ -240,8 +240,8 @@ class TestVfMacFilter(TestCase): self.result_verify_iplink(False) def send_packet_and_verify(self): - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) # Get VF0 port MAC address @@ -257,17 +257,17 @@ class TestVfMacFilter(TestCase): time.sleep(2) tgen_ports = [] - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_ports.append((tx_port, rx_port)) - src_mac = self.tester.get_mac(tx_port) + src_mac = self.tg_node.get_mac(tx_port) dst_mac = pmd_vf0_mac pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] print( "\nfirst send packets to the random generated VF MAC, expected result is RX packets=TX packets\n" ) - result1 = self.tester.check_random_pkts( + result1 = self.tg_node.check_random_pkts( tgen_ports, pktnum=100, allow_miss=False, params=pkt_param ) print( @@ -281,7 +281,7 @@ class TestVfMacFilter(TestCase): ) dst_mac = self.vf0_setmac pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] - result2 = self.tester.check_random_pkts( + result2 = self.tg_node.check_random_pkts( tgen_ports, pktnum=100, allow_miss=False, params=pkt_param ) print( @@ -299,7 +299,7 @@ class TestVfMacFilter(TestCase): dst_mac = self.vf0_setmac pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] - result3 = self.tester.check_random_pkts( + result3 = self.tg_node.check_random_pkts( tgen_ports, pktnum=100, allow_miss=False, params=pkt_param ) print( @@ -313,7 +313,7 @@ class TestVfMacFilter(TestCase): ) dst_mac = self.vf0_wrongmac pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] - result4 = self.tester.check_random_pkts( + result4 = self.tg_node.check_random_pkts( tgen_ports, pktnum=100, allow_miss=False, params=pkt_param ) print( @@ -326,14 +326,14 @@ class TestVfMacFilter(TestCase): if self.setup_2pf_2vf_1vm_env_flag == 1: self.destroy_2pf_2vf_1vm_env() - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): if getattr(self, "vm0", None): self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) diff --git a/tests/TestSuite_vf_offload.py b/tests/TestSuite_vf_offload.py index 4dd05cac..ec949e78 100644 --- a/tests/TestSuite_vf_offload.py +++ b/tests/TestSuite_vf_offload.py @@ -7,7 +7,7 @@ import string import time import framework.utils as utils -from framework.crb import Crb +from framework.node import Node from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE from framework.test_case import TestCase @@ -25,8 +25,8 @@ class TestVfOffload(TestCase): supported_vf_driver = ["pci-stub", "vfio-pci"] def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None # set vf assign method and vf driver @@ -38,18 +38,18 @@ class TestVfOffload(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.setup_2pf_2vf_1vm_env_flag = 0 self.setup_2pf_2vf_1vm_env(driver="") - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.portMask = utils.create_mask([self.vm0_dut_ports[0]]) - self.vm0_testpmd = PmdOutput(self.vm_dut_0) - self.tester.send_expect( + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.portMask = utils.create_mask([self.vm0_sut_ports[0]]) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) + self.tg_node.send_expect( "ifconfig %s mtu %s" % ( - self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ), TSO_MTU, ), @@ -61,12 +61,12 @@ class TestVfOffload(TestCase): def setup_2pf_2vf_1vm_env(self, driver="default"): - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] - self.used_dut_port_1 = self.dut_ports[1] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver=driver) - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] + self.used_sut_port_1 = self.sut_ports[1] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 1, driver=driver) + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] try: @@ -82,15 +82,15 @@ class TestVfOffload(TestCase): if driver == "igb_uio": # start testpmd without the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) self.host_testpmd.start_testpmd("1S/2C/2T") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_offload") + self.vm0 = VM(self.sut_node, "vm0", "vf_offload") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") self.setup_2pf_2vf_1vm_env_flag = 1 @@ -102,59 +102,59 @@ class TestVfOffload(TestCase): if getattr(self, "vm0", None): # destroy testpmd in vm0 self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm0 = None if getattr(self, "host_testpmd", None): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] port.bind_driver() - self.used_dut_port_0 = None + self.used_sut_port_0 = None - if getattr(self, "used_dut_port_1", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - port = self.dut.ports_info[self.used_dut_port_1]["port"] + if getattr(self, "used_sut_port_1", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + port = self.sut_node.ports_info[self.used_sut_port_1]["port"] port.bind_driver() - self.used_dut_port_1 = None + self.used_sut_port_1 = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_2pf_2vf_1vm_env_flag = 0 - def checksum_enablehw(self, port, dut): - dut.send_expect("port stop all", "testpmd>") - dut.send_expect("csum set ip hw %d" % port, "testpmd>") - dut.send_expect("csum set udp hw %d" % port, "testpmd>") - dut.send_expect("csum set tcp hw %d" % port, "testpmd>") - dut.send_expect("csum set sctp hw %d" % port, "testpmd>") - dut.send_expect("port start all", "testpmd>") - - def checksum_enablesw(self, port, dut): - dut.send_expect("port stop all", "testpmd>") - dut.send_expect("csum set ip sw %d" % port, "testpmd>") - dut.send_expect("csum set udp sw %d" % port, "testpmd>") - dut.send_expect("csum set tcp sw %d" % port, "testpmd>") - dut.send_expect("csum set sctp sw %d" % port, "testpmd>") - dut.send_expect("port start all", "testpmd>") + def checksum_enablehw(self, port, sut): + sut.send_expect("port stop all", "testpmd>") + sut.send_expect("csum set ip hw %d" % port, "testpmd>") + sut.send_expect("csum set udp hw %d" % port, "testpmd>") + sut.send_expect("csum set tcp hw %d" % port, "testpmd>") + sut.send_expect("csum set sctp hw %d" % port, "testpmd>") + sut.send_expect("port start all", "testpmd>") + + def checksum_enablesw(self, port, sut): + sut.send_expect("port stop all", "testpmd>") + sut.send_expect("csum set ip sw %d" % port, "testpmd>") + sut.send_expect("csum set udp sw %d" % port, "testpmd>") + sut.send_expect("csum set tcp sw %d" % port, "testpmd>") + sut.send_expect("csum set sctp sw %d" % port, "testpmd>") + sut.send_expect("port start all", "testpmd>") def checksum_validate(self, packets_sent, packets_expected): """ Validate the checksum. """ - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - rx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + rx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) sniff_src = self.vm0_testpmd.get_port_mac(0) @@ -163,39 +163,39 @@ class TestVfOffload(TestCase): chksum = dict() result = dict() - self.tester.send_expect("scapy", ">>> ") + self.tg_node.send_expect("scapy", ">>> ") for packet_type in list(packets_expected.keys()): - self.tester.send_expect("p = %s" % packets_expected[packet_type], ">>>") - out = self.tester.send_expect("p.show2()", ">>>") + self.tg_node.send_expect("p = %s" % packets_expected[packet_type], ">>>") + out = self.tg_node.send_expect("p.show2()", ">>>") chksums = checksum_pattern.findall(out) chksum[packet_type] = chksums print(packet_type, ": ", chksums) - self.tester.send_expect("exit()", "#") + self.tg_node.send_expect("exit()", "#") - self.tester.scapy_background() - self.tester.scapy_append( + self.tg_node.scapy_background() + self.tg_node.scapy_append( 'p = sniff(filter="ether src %s", iface="%s", count=%d)' % (sniff_src, rx_interface, len(packets_sent)) ) - self.tester.scapy_append("nr_packets=len(p)") - self.tester.scapy_append( + self.tg_node.scapy_append("nr_packets=len(p)") + self.tg_node.scapy_append( 'reslist = [p[i].sprintf("%IP.chksum%;%TCP.chksum%;%UDP.chksum%;%SCTP.chksum%") for i in range(nr_packets)]' ) - self.tester.scapy_append("import string") - self.tester.scapy_append('RESULT = ",".join(reslist)') + self.tg_node.scapy_append("import string") + self.tg_node.scapy_append('RESULT = ",".join(reslist)') # Send packet. - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() for packet_type in list(packets_sent.keys()): - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s")' % (packets_sent[packet_type], tx_interface) ) - self.tester.scapy_execute() - out = self.tester.scapy_get_result() + self.tg_node.scapy_execute() + out = self.tg_node.scapy_get_result() packets_received = out.split(",") self.verify( len(packets_sent) == len(packets_received), "Unexpected Packets Drop" @@ -292,7 +292,7 @@ class TestVfOffload(TestCase): % (mac, expIPv6), } - self.checksum_enablehw(0, self.vm_dut_0) + self.checksum_enablehw(0, self.vm_sut_0) self.vm0_testpmd.execute_cmd("start") result = self.checksum_validate(pkts, pkts_ref) @@ -349,7 +349,7 @@ class TestVfOffload(TestCase): % (mac, expIPv6), } - self.checksum_enablesw(0, self.vm_dut_0) + self.checksum_enablesw(0, self.vm_sut_0) self.vm0_testpmd.execute_cmd("start") result = self.checksum_validate(sndPkts, expPkts) @@ -365,8 +365,8 @@ class TestVfOffload(TestCase): def tcpdump_start_sniffing(self, ifaces=[]): """ - Start tcpdump in the background to sniff the tester interface where - the packets are transmitted to and from the self.dut. + Start tcpdump in the background to sniff the TG interface where + the packets are transmitted to and from the self.sut_node. All the captured packets are going to be stored in a file for a post-analysis. """ @@ -375,16 +375,16 @@ class TestVfOffload(TestCase): command = ("tcpdump -w tcpdump_{0}.pcap -i {0} 2>tcpdump_{0}.out &").format( iface ) - self.tester.send_expect("rm -f tcpdump_{0}.pcap", "#").format(iface) - self.tester.send_expect(command, "#") + self.tg_node.send_expect("rm -f tcpdump_{0}.pcap", "#").format(iface) + self.tg_node.send_expect(command, "#") def tcpdump_stop_sniff(self): """ Stop the tcpdump process running in the background. """ - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") time.sleep(1) - self.tester.send_expect('echo "Cleaning buffer"', "#") + self.tg_node.send_expect('echo "Cleaning buffer"', "#") time.sleep(1) def tcpdump_command(self, command): @@ -392,14 +392,14 @@ class TestVfOffload(TestCase): Send a tcpdump related command and return an integer from the output. """ - result = self.tester.send_expect(command, "#") + result = self.tg_node.send_expect(command, "#") print(result) return int(result.strip()) def number_of_packets(self, iface): """ By reading the file generated by tcpdump it counts how many packets are - forwarded by the sample app and received in the self.tester. The sample app + forwarded by the sample app and received in the self.tg_node. The sample app will add a known MAC address for the test to look for. """ @@ -413,7 +413,7 @@ class TestVfOffload(TestCase): """ Execute scanner to return results """ - scanner_result = self.tester.send_expect(scanner, "#") + scanner_result = self.tg_node.send_expect(scanner, "#") fially_result = re.findall(r"length( \d+)", scanner_result) return list(fially_result) @@ -428,29 +428,29 @@ class TestVfOffload(TestCase): """ TSO IPv4 TCP, IPv6 TCP testing. """ - tx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + tx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - rx_interface = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[1]) + rx_interface = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[1]) ) self.loading_sizes = [128, 800, 801, 1700, 2500] - self.tester.send_expect( + self.tg_node.send_expect( "ethtool -K %s rx off tx off tso off gso off gro off lro off" % tx_interface, "# ", ) - self.tester.send_expect("ip l set %s up" % tx_interface, "# ") - self.dut.send_expect( - "ifconfig %s mtu %s" % (self.dut.ports_info[0]["intf"], TSO_MTU), "# " + self.tg_node.send_expect("ip l set %s up" % tx_interface, "# ") + self.sut_node.send_expect( + "ifconfig %s mtu %s" % (self.sut_node.ports_info[0]["intf"], TSO_MTU), "# " ) - self.dut.send_expect( - "ifconfig %s mtu %s" % (self.dut.ports_info[1]["intf"], TSO_MTU), "# " + self.sut_node.send_expect( + "ifconfig %s mtu %s" % (self.sut_node.ports_info[1]["intf"], TSO_MTU), "# " ) - self.portMask = utils.create_mask([self.vm0_dut_ports[0]]) + self.portMask = utils.create_mask([self.vm0_sut_ports[0]]) self.vm0_testpmd.start_testpmd( VM_CORES_MASK, "--portmask=0x3 " + "--enable-rx-cksum " + "--max-pkt-len=%s" % TSO_MTU, @@ -461,61 +461,61 @@ class TestVfOffload(TestCase): self.vm0_testpmd.execute_cmd("set verbose 1", "testpmd> ", 120) self.vm0_testpmd.execute_cmd("port stop all", "testpmd> ", 120) self.vm0_testpmd.execute_cmd( - "csum set ip hw %d" % self.dut_ports[0], "testpmd> ", 120 + "csum set ip hw %d" % self.sut_ports[0], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set udp hw %d" % self.dut_ports[0], "testpmd> ", 120 + "csum set udp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set tcp hw %d" % self.dut_ports[0], "testpmd> ", 120 + "csum set tcp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set sctp hw %d" % self.dut_ports[0], "testpmd> ", 120 + "csum set sctp hw %d" % self.sut_ports[0], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set outer-ip hw %d" % self.dut_ports[0], "testpmd> ", 120 + "csum set outer-ip hw %d" % self.sut_ports[0], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum parse-tunnel on %d" % self.dut_ports[0], "testpmd> ", 120 + "csum parse-tunnel on %d" % self.sut_ports[0], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set ip hw %d" % self.dut_ports[1], "testpmd> ", 120 + "csum set ip hw %d" % self.sut_ports[1], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set udp hw %d" % self.dut_ports[1], "testpmd> ", 120 + "csum set udp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set tcp hw %d" % self.dut_ports[1], "testpmd> ", 120 + "csum set tcp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set sctp hw %d" % self.dut_ports[1], "testpmd> ", 120 + "csum set sctp hw %d" % self.sut_ports[1], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum set outer-ip hw %d" % self.dut_ports[1], "testpmd> ", 120 + "csum set outer-ip hw %d" % self.sut_ports[1], "testpmd> ", 120 ) self.vm0_testpmd.execute_cmd( - "csum parse-tunnel on %d" % self.dut_ports[1], "testpmd> ", 120 + "csum parse-tunnel on %d" % self.sut_ports[1], "testpmd> ", 120 ) - self.vm0_testpmd.execute_cmd("tso set 800 %d" % self.vm0_dut_ports[1]) + self.vm0_testpmd.execute_cmd("tso set 800 %d" % self.vm0_sut_ports[1]) self.vm0_testpmd.execute_cmd("set fwd csum") self.vm0_testpmd.execute_cmd("port start all", "testpmd> ", 120) self.vm0_testpmd.execute_cmd("set promisc all off", "testpmd> ", 120) self.vm0_testpmd.execute_cmd("start") - self.tester.scapy_foreground() + self.tg_node.scapy_foreground() time.sleep(5) for loading_size in self.loading_sizes: # IPv4 tcp test out = self.vm0_testpmd.execute_cmd("clear port info all", "testpmd> ", 120) self.tcpdump_start_sniffing([tx_interface, rx_interface]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s",src="52:00:00:00:00:00")/IP(src="192.168.1.1",dst="192.168.1.2")/TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' % (mac, loading_size, tx_interface) ) - out = self.tester.scapy_execute() + out = self.tg_node.scapy_execute() out = self.vm0_testpmd.execute_cmd("show port stats all") print(out) self.tcpdump_stop_sniff() @@ -545,11 +545,11 @@ class TestVfOffload(TestCase): # IPv6 tcp test out = self.vm0_testpmd.execute_cmd("clear port info all", "testpmd> ", 120) self.tcpdump_start_sniffing([tx_interface, rx_interface]) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([Ether(dst="%s", src="52:00:00:00:00:00")/IPv6(src="FE80:0:0:0:200:1FF:FE00:200", dst="3555:5555:6666:6666:7777:7777:8888:8888")/TCP(sport=1021,dport=1021)/("X"*%s)], iface="%s")' % (mac, loading_size, tx_interface) ) - out = self.tester.scapy_execute() + out = self.tg_node.scapy_execute() out = self.vm0_testpmd.execute_cmd("show port stats all") print(out) self.tcpdump_stop_sniff() @@ -577,19 +577,19 @@ class TestVfOffload(TestCase): def tear_down(self): self.vm0_testpmd.execute_cmd("quit", "# ") - self.dut.send_expect( - "ifconfig %s mtu %s" % (self.dut.ports_info[0]["intf"], DEFAULT_MTU), "# " + self.sut_node.send_expect( + "ifconfig %s mtu %s" % (self.sut_node.ports_info[0]["intf"], DEFAULT_MTU), "# " ) def tear_down_all(self): print("tear_down_all") if self.setup_2pf_2vf_1vm_env_flag == 1: self.destroy_2pf_2vf_1vm_env() - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %s" % ( - self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ), DEFAULT_MTU, ), diff --git a/tests/TestSuite_vf_packet_rxtx.py b/tests/TestSuite_vf_packet_rxtx.py index 5d57c462..fff3a8f8 100644 --- a/tests/TestSuite_vf_packet_rxtx.py +++ b/tests/TestSuite_vf_packet_rxtx.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.virt_common import VM @@ -19,8 +19,8 @@ class TestVfPacketRxtx(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None self.vm1 = None @@ -33,7 +33,7 @@ class TestVfPacketRxtx(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def set_up(self): @@ -42,13 +42,13 @@ class TestVfPacketRxtx(TestCase): def setup_2pf_2vf_1vm_env(self, driver="default"): - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] - self.used_dut_port_1 = self.dut_ports[1] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver=driver) - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] + self.used_sut_port_1 = self.sut_ports[1] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 1, driver=driver) + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] try: @@ -64,7 +64,7 @@ class TestVfPacketRxtx(TestCase): if driver == "igb_uio": # start testpmd without the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) if self.nic in [ "IXGBE_10G-82599_SFP", "IXGBE_10G-X550T", @@ -76,11 +76,11 @@ class TestVfPacketRxtx(TestCase): self.host_testpmd.start_testpmd("1S/5C/1T") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_packet_rxtx") + self.vm0 = VM(self.sut_node, "vm0", "vf_packet_rxtx") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") self.setup_2pf_2vf_1vm_env_flag = 1 @@ -95,7 +95,7 @@ class TestVfPacketRxtx(TestCase): self.vm0_testpmd.execute_cmd("stop") self.vm0_testpmd.execute_cmd("quit", "# ") self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None @@ -104,22 +104,22 @@ class TestVfPacketRxtx(TestCase): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - self.dut.virt_exit() + self.sut_node.virt_exit() - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] port.bind_driver() - self.used_dut_port_0 = None + self.used_sut_port_0 = None - if getattr(self, "used_dut_port_1", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - port = self.dut.ports_info[self.used_dut_port_1]["port"] + if getattr(self, "used_sut_port_1", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + port = self.sut_node.ports_info[self.used_sut_port_1]["port"] port.bind_driver() - self.used_dut_port_1 = None + self.used_sut_port_1 = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_2pf_2vf_1vm_env_flag = 0 @@ -131,9 +131,9 @@ class TestVfPacketRxtx(TestCase): else: self.setup_2pf_2vf_1vm_env(driver="") - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") port_id_0 = 0 - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) out = self.vm0_testpmd.start_testpmd(VM_CORES_MASK) pmd_vf0_mac = self.vm0_testpmd.get_port_mac(port_id_0) self.vm0_testpmd.execute_cmd("set fwd mac") @@ -143,16 +143,16 @@ class TestVfPacketRxtx(TestCase): time.sleep(2) tgen_ports = [] - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_ports.append((tx_port, rx_port)) dst_mac = pmd_vf0_mac - src_mac = self.tester.get_mac(tx_port) + src_mac = self.tg_node.get_mac(tx_port) pkt_param = [("ether", {"dst": dst_mac, "src": src_mac})] - result = self.tester.check_random_pkts( + result = self.tg_node.check_random_pkts( tgen_ports, allow_miss=False, params=pkt_param ) print(self.vm0_testpmd.execute_cmd("show port stats all")) @@ -170,9 +170,9 @@ class TestVfPacketRxtx(TestCase): def setup_3vf_2vm_env(self, driver="default"): - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 3, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 3, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] try: @@ -185,14 +185,14 @@ class TestVfPacketRxtx(TestCase): vf1_prop = {"opt_host": self.sriov_vfs_port[1].pci} vf2_prop = {"opt_host": self.sriov_vfs_port[2].pci} - for port_id in self.dut_ports: - if port_id == self.used_dut_port: + for port_id in self.sut_ports: + if port_id == self.used_sut_port: continue - port = self.dut.ports_info[port_id]["port"] + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() if driver == "igb_uio": - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) if self.nic in [ "IXGBE_10G-82599_SFP", "IXGBE_10G-X550T", @@ -204,17 +204,17 @@ class TestVfPacketRxtx(TestCase): self.host_testpmd.start_testpmd("1S/2C/2T") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_packet_rxtx") + self.vm0 = VM(self.sut_node, "vm0", "vf_packet_rxtx") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") # set up VM1 ENV - self.vm1 = VM(self.dut, "vm1", "vf_packet_rxtx") + self.vm1 = VM(self.sut_node, "vm1", "vf_packet_rxtx") self.vm1.set_vm_device(driver=self.vf_assign_method, **vf2_prop) - self.vm_dut_1 = self.vm1.start() - if self.vm_dut_1 is None: + self.vm_sut_1 = self.vm1.start() + if self.vm_sut_1 is None: raise Exception("Set up VM1 ENV failed!") self.setup_3vf_2vm_env_flag = 1 @@ -228,8 +228,8 @@ class TestVfPacketRxtx(TestCase): self.vm0_testpmd.execute_cmd("stop") self.vm0_testpmd.execute_cmd("quit", "# ") self.vm0_testpmd = None - self.vm0_dut_ports = None - self.vm_dut_0 = None + self.vm0_sut_ports = None + self.vm_sut_0 = None self.vm0.stop() self.vm0 = None @@ -238,25 +238,25 @@ class TestVfPacketRxtx(TestCase): self.vm1_testpmd.execute_cmd("stop") self.vm1_testpmd.execute_cmd("quit", "# ") self.vm1_testpmd = None - self.vm1_dut_ports = None - self.vm_dut_1 = None + self.vm1_sut_ports = None + self.vm_sut_1 = None self.vm1.stop() self.vm1 = None - self.dut.virt_exit() + self.sut_node.virt_exit() if getattr(self, "host_testpmd", None) != None: self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver() - self.used_dut_port = None + self.used_sut_port = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_3vf_2vm_env_flag = 0 @@ -271,13 +271,13 @@ class TestVfPacketRxtx(TestCase): self.vf_reset() def vf_reset(self): - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") - self.vm1_dut_ports = self.vm_dut_1.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") + self.vm1_sut_ports = self.vm_sut_1.get_ports("any") port_id_0 = 0 port_id_1 = 1 - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd("show port info all") pmd0_vf0_mac = self.vm0_testpmd.get_port_mac(port_id_0) @@ -287,21 +287,21 @@ class TestVfPacketRxtx(TestCase): time.sleep(2) - self.vm1_testpmd = PmdOutput(self.vm_dut_1) + self.vm1_testpmd = PmdOutput(self.vm_sut_1) self.vm1_testpmd.start_testpmd(VM_CORES_MASK) self.vm1_testpmd.execute_cmd("show port info all") - tx_port = self.tester.get_local_port(self.dut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) rx_port = tx_port dst_mac = pmd0_vf0_mac self.vm0_testpmd.execute_cmd("clear port stats all") - pkt = Packet( + scapy_pkt_builder = ScapyPacketBuilder( "Ether(dst='%s', src='%s')/IP(len=46)" - % (dst_mac, self.tester.get_mac(tx_port)) + % (dst_mac, self.tg_node.get_mac(tx_port)) ) - session_bg = pkt.send_pkt_bg( - crb=self.tester, tx_port=self.tester.get_interface(tx_port), loop=1 + session_bg = scapy_pkt_builder.send_pkt_bg( + node=self.tg_node, tx_port=self.tg_node.get_interface(tx_port), loop=1 ) # vf port stop/start can trigger reset action @@ -311,7 +311,7 @@ class TestVfPacketRxtx(TestCase): self.vm1_testpmd.execute_cmd("port start all") time.sleep(0.1) - pkt.stop_send_pkt_bg(session_bg) + scapy_pkt_builder.stop_send_pkt_bg(session_bg) pmd0_vf0_stats = self.vm0_testpmd.get_pmd_stats(port_id_0) pmd0_vf1_stats = self.vm0_testpmd.get_pmd_stats(port_id_1) @@ -346,13 +346,13 @@ class TestVfPacketRxtx(TestCase): if getattr(self, "vm1", None): self.vm1.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) # DPDK-1754 - intf = self.dut.ports_info[port_id]["intf"] - self.dut.send_expect("ethtool -s %s autoneg on" % intf, "# ") + intf = self.sut_node.ports_info[port_id]["intf"] + self.sut_node.send_expect("ethtool -s %s autoneg on" % intf, "# ") def tear_down_all(self): pass diff --git a/tests/TestSuite_vf_port_start_stop.py b/tests/TestSuite_vf_port_start_stop.py index 78f76935..b00d3831 100644 --- a/tests/TestSuite_vf_port_start_stop.py +++ b/tests/TestSuite_vf_port_start_stop.py @@ -5,8 +5,8 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.virt_common import VM @@ -19,11 +19,11 @@ class TestVfPortStartStop(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None - self.tester_tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.tester_tintf = self.tester.get_interface(self.tester_tx_port) + self.tg_tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_tintf = self.tg_node.get_interface(self.tg_tx_port) # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()["vf_driver"] if self.vf_driver is None: @@ -33,21 +33,21 @@ class TestVfPortStartStop(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def set_up(self): self.setup_1pf_2vf_1vm_env_flag = 0 self.send_pks_session = None - self.pkts = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() def send_and_verify(self, dst_mac, testpmd): """ - Generates packets by pktgen + Generates packets by traffic generator """ self.testpmd_reset_status(testpmd) - src_mac = self.tester.get_mac(self.tester_tx_port) + src_mac = self.tg_node.get_mac(self.tg_tx_port) if src_mac == "N/A": src_mac = "02:00:00:00:01" self.send_pkts(dst_mac, src_mac) @@ -72,8 +72,8 @@ class TestVfPortStartStop(TestCase): } for key in list(def_pkts.keys()): - self.pkts.append_pkt(def_pkts[key]) - self.send_pks_session = self.pkts.send_pkt_bg(self.tester, self.tester_tintf) + self.scapy_pkt_builder.append_pkt(def_pkts[key]) + self.send_pks_session = self.scapy_pkt_builder.send_pkt_bg(self.tg_node, self.tg_tintf) def testpmd_reset_status(self, testpmd): """ @@ -115,9 +115,9 @@ class TestVfPortStartStop(TestCase): def setup_1pf_2vf_1vm_env(self, driver="default"): - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 2, driver=driver) + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] try: @@ -131,15 +131,15 @@ class TestVfPortStartStop(TestCase): if driver == "igb_uio": # start testpmd without the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) self.host_testpmd.start_testpmd("1S/2C/2T") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_port_start_stop") + self.vm0 = VM(self.sut_node, "vm0", "vf_port_start_stop") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") self.setup_1pf_2vf_1vm_env_flag = 1 @@ -154,7 +154,7 @@ class TestVfPortStartStop(TestCase): self.vm0_testpmd.execute_cmd("stop") self.vm0_testpmd.execute_cmd("quit", "# ") self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None @@ -163,14 +163,14 @@ class TestVfPortStartStop(TestCase): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver() - self.used_dut_port = None + self.used_sut_port = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_1pf_2vf_1vm_env_flag = 0 @@ -179,21 +179,21 @@ class TestVfPortStartStop(TestCase): self.setup_1pf_2vf_1vm_env(driver="") - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd("set fwd mac") time.sleep(2) - dst_mac = self.vm0_testpmd.get_port_mac(self.vm0_dut_ports[0]) + dst_mac = self.vm0_testpmd.get_port_mac(self.vm0_sut_ports[0]) self.send_and_verify(dst_mac, self.vm0_testpmd) def tear_down(self): if self.send_pks_session: - self.pkts.stop_send_pkt_bg(self.send_pks_session) + self.scapy_pkt_builder.stop_send_pkt_bg(self.send_pks_session) if self.setup_1pf_2vf_1vm_env_flag == 1: self.destroy_1pf_2vf_1vm_env() @@ -202,7 +202,7 @@ class TestVfPortStartStop(TestCase): if getattr(self, "vm0", None): self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() - for port_id in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(port_id) + for port_id in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(port_id) diff --git a/tests/TestSuite_vf_rss.py b/tests/TestSuite_vf_rss.py index 288ffb84..b2cf70f5 100644 --- a/tests/TestSuite_vf_rss.py +++ b/tests/TestSuite_vf_rss.py @@ -32,10 +32,10 @@ class TestVfRss(TestCase): """ global reta_lines reta_lines = [] - self.tester.scapy_foreground() - self.tester.scapy_append('sys.path.append("./")') - self.tester.scapy_append("from sctp import *") - self.vm_dut_0.send_expect("start", "testpmd>") + self.tg_node.scapy_foreground() + self.tg_node.scapy_append('sys.path.append("./")') + self.tg_node.scapy_append("from sctp import *") + self.vm_sut_0.send_expect("start", "testpmd>") mac = self.vm0_testpmd.get_port_mac(0) # send packet with different source and dest ip if tran_type == "ipv4-other": @@ -44,8 +44,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-frag": for i in range(packet_count): @@ -53,8 +53,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d", frag=1, flags="MF")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-tcp": for i in range(packet_count): @@ -62,8 +62,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d")/TCP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-udp": for i in range(packet_count): @@ -71,8 +71,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IP(src="192.168.0.%d", dst="192.168.0.%d")/UDP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv4-sctp": for i in range(packet_count): @@ -80,13 +80,13 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1024,dport=1025,tag=1)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet = ( r'sendp([Ether(dst="%s")/IP(src="192.168.0.%d", dst="192.168.0.%d")/SCTP(sport=1025,dport=1024,tag=1)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "l2_payload": for i in range(packet_count): @@ -94,8 +94,8 @@ class TestVfRss(TestCase): r'sendp([Ether(src="00:00:00:00:00:%02d",dst="%s")], iface="%s")' % (i + 1, mac, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-other": @@ -104,8 +104,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-frag": for i in range(packet_count): @@ -113,8 +113,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d", nh=44)/IPv6ExtHdrFragment()], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-tcp": for i in range(packet_count): @@ -122,8 +122,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/TCP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-udp": for i in range(packet_count): @@ -131,8 +131,8 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s", src="02:00:00:00:00:00")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d")/UDP(sport=1024,dport=1024)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) elif tran_type == "ipv6-sctp": for i in range(packet_count): @@ -140,19 +140,19 @@ class TestVfRss(TestCase): r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d", nh=132)/SCTP(sport=1024,dport=1025,tag=1)], iface="%s")' % (mac, i + 1, i + 2, itf) ) - self.tester.scapy_append(packet) + self.tg_node.scapy_append(packet) packet = ( r'sendp([Ether(dst="%s")/IPv6(src="3ffe:2501:200:1fff::%d", dst="3ffe:2501:200:3::%d", nh=132)/SCTP(sport=1025,dport=1024,tag=1)], iface="%s")' % (mac, i + 2, i + 1, itf) ) - self.tester.scapy_append(packet) - self.tester.scapy_execute() + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() time.sleep(0.5) else: print("\ntran_type error!\n") - out = self.vm_dut_0.get_session_output() + out = self.vm_sut_0.get_session_output() print("*******************************************") print(out) if not reta_entries: @@ -272,8 +272,8 @@ class TestVfRss(TestCase): ], "NIC Unsupported: " + str(self.nic), ) - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports available") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports available") # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()["vf_driver"] @@ -284,7 +284,7 @@ class TestVfRss(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") self.vm0 = None self.host_testpmd = None @@ -299,9 +299,9 @@ class TestVfRss(TestCase): def setup_1pf_1vf_1vm_env(self, driver="default"): - self.used_dut_port_0 = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.used_sut_port_0 = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] try: for port in self.sriov_vfs_port_0: @@ -312,18 +312,18 @@ class TestVfRss(TestCase): if driver == "igb_uio": # start testpmd without the two VFs on the host - self.host_testpmd = PmdOutput(self.dut) + self.host_testpmd = PmdOutput(self.sut_node) self.host_testpmd.start_testpmd("1S/2C/2T") # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_rss") + self.vm0 = VM(self.sut_node, "vm0", "vf_rss") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prot) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.setup_1pf_1vf_1vm_env_flag = 1 except Exception as e: @@ -335,34 +335,34 @@ class TestVfRss(TestCase): if getattr(self, "vm0_testpmd", None): self.vm0_testpmd.execute_cmd("quit", "# ") self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm0 = None if getattr(self, "host_testpmd", None): self.host_testpmd.execute_cmd("quit", "# ") self.host_testpmd = None - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] port.bind_driver() - self.used_dut_port_0 = None + self.used_sut_port_0 = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() self.setup_1pf_2vf_1vm_env_flag = 0 def test_vf_pmdrss_reta(self): - vm0dutPorts = self.vm_dut_0.get_ports("any") - localPort = self.tester.get_local_port(vm0dutPorts[0]) - itf = self.tester.get_interface(localPort) - self.vm0_ports_socket = self.vm_dut_0.get_numa_id(vm0dutPorts[0]) + vm0sutPorts = self.vm_sut_0.get_ports("any") + localPort = self.tg_node.get_local_port(vm0sutPorts[0]) + itf = self.tg_node.get_interface(localPort) + self.vm0_ports_socket = self.vm_sut_0.get_numa_id(vm0sutPorts[0]) iptypes = { "ipv4-other": "ip", "ipv4-frag": "ip", @@ -377,7 +377,7 @@ class TestVfRss(TestCase): # 'l2_payload': 'ether' } - self.vm_dut_0.kill_all() + self.vm_sut_0.kill_all() # test with different rss queues eal_param = "" @@ -390,9 +390,9 @@ class TestVfRss(TestCase): ) for iptype, rss_type in list(iptypes.items()): - self.vm_dut_0.send_expect("set verbose 8", "testpmd> ") - self.vm_dut_0.send_expect("set fwd rxonly", "testpmd> ") - self.vm_dut_0.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.vm_sut_0.send_expect("set verbose 8", "testpmd> ") + self.vm_sut_0.send_expect("set fwd rxonly", "testpmd> ") + self.vm_sut_0.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") # configure the reta with specific mappings. if ( @@ -410,33 +410,33 @@ class TestVfRss(TestCase): continue for i in range(64): reta_entries.insert(i, random.randint(0, queue - 1)) - self.vm_dut_0.send_expect( + self.vm_sut_0.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ", ) - self.vm_dut_0.send_expect( + self.vm_sut_0.send_expect( "port config all rss %s" % rss_type, "testpmd> " ) else: for i in range(512): reta_entries.insert(i, random.randint(0, queue - 1)) - self.vm_dut_0.send_expect( + self.vm_sut_0.send_expect( "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ", ) - self.vm_dut_0.send_expect( + self.vm_sut_0.send_expect( "port config all rss %s" % rss_type, "testpmd> " ) self.send_packet(itf, iptype, queue) - self.vm_dut_0.send_expect("quit", "# ", 30) + self.vm_sut_0.send_expect("quit", "# ", 30) def test_vf_pmdrss(self): - vm0dutPorts = self.vm_dut_0.get_ports("any") - localPort = self.tester.get_local_port(vm0dutPorts[0]) - itf = self.tester.get_interface(localPort) - self.vm0_ports_socket = self.vm_dut_0.get_numa_id(vm0dutPorts[0]) + vm0sutPorts = self.vm_sut_0.get_ports("any") + localPort = self.tg_node.get_local_port(vm0sutPorts[0]) + itf = self.tg_node.get_interface(localPort) + self.vm0_ports_socket = self.vm_sut_0.get_numa_id(vm0sutPorts[0]) iptypes = { "ipv4-other": "ip", "ipv4-udp": "udp", @@ -449,7 +449,7 @@ class TestVfRss(TestCase): # 'l2_payload':'ether' } - self.vm_dut_0.kill_all() + self.vm_sut_0.kill_all() eal_param = "" # test with different rss queues @@ -462,8 +462,8 @@ class TestVfRss(TestCase): ) for iptype, rsstype in list(iptypes.items()): - self.vm_dut_0.send_expect("set verbose 8", "testpmd> ") - self.vm_dut_0.send_expect("set fwd rxonly", "testpmd> ") + self.vm_sut_0.send_expect("set verbose 8", "testpmd> ") + self.vm_sut_0.send_expect("set fwd rxonly", "testpmd> ") if ( self.nic in ["IXGBE_10G-X550T", "IXGBE_10G-X550EM_X_10G_T"] and rsstype == "sctp" @@ -472,16 +472,16 @@ class TestVfRss(TestCase): "IXGBE_10G-X550T and IXGBE_10G-X550EM_X_10G_T do not support rsstype sctp" ) continue - out = self.vm_dut_0.send_expect( + out = self.vm_sut_0.send_expect( "port config all rss %s" % rsstype, "testpmd> " ) self.verify( "Operation not supported" not in out, "Operation not supported" ) - self.vm_dut_0.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") + self.vm_sut_0.send_expect("set nbcore %d" % (queue + 1), "testpmd> ") self.send_packet(itf, iptype, queue, 128) - self.vm_dut_0.send_expect("quit", "# ", 30) + self.vm_sut_0.send_expect("quit", "# ", 30) def tear_down(self): """ @@ -493,5 +493,5 @@ class TestVfRss(TestCase): """ Run after each test suite. """ - # self.vm_dut_0.kill_all() + # self.vm_sut_0.kill_all() self.destroy_1pf_1vf_1vm_env() diff --git a/tests/TestSuite_vf_single_core_perf.py b/tests/TestSuite_vf_single_core_perf.py index 7101e4b9..a58b8b3f 100644 --- a/tests/TestSuite_vf_single_core_perf.py +++ b/tests/TestSuite_vf_single_core_perf.py @@ -12,10 +12,10 @@ from copy import deepcopy import framework.rst as rst import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVfSingleCorePerf(TestCase): @@ -25,11 +25,11 @@ class TestVfSingleCorePerf(TestCase): PMD prerequisites. """ # Based on h/w type, choose how many ports to use - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "At least 1 port is required to test") - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.vfs_mac = ["00:12:34:56:78:0%d" % (i + 1) for i in self.dut_ports] - self.pmdout = PmdOutput(self.dut) + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "At least 1 port is required to test") + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.vfs_mac = ["00:12:34:56:78:0%d" % (i + 1) for i in self.sut_ports] + self.pmdout = PmdOutput(self.sut_node) # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()["vf_driver"] @@ -43,7 +43,7 @@ class TestVfSingleCorePerf(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() # determine if to save test result as a separated file self.save_result_flag = True @@ -83,26 +83,26 @@ class TestVfSingleCorePerf(TestCase): """ require enough PF ports,using kernel or dpdk driver, create 1 VF from each PF. """ - self.used_dut_port = [port for port in self.dut_ports] + self.used_sut_port = [port for port in self.sut_ports] self.sriov_vfs_port = [] - for i in self.dut_ports: - host_driver = self.dut.ports_info[i]["port"].default_driver - self.dut.generate_sriov_vfs_by_port( - self.used_dut_port[i], 1, driver=host_driver + for i in self.sut_ports: + host_driver = self.sut_node.ports_info[i]["port"].default_driver + self.sut_node.generate_sriov_vfs_by_port( + self.used_sut_port[i], 1, driver=host_driver ) - sriov_vfs_port = self.dut.ports_info[self.used_dut_port[i]]["vfs_port"] + sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port[i]]["vfs_port"] self.sriov_vfs_port.append(sriov_vfs_port) # set vf mac address. - for i in self.dut_ports: - pf_intf = self.dut.ports_info[i]["port"].get_interface_name() - self.dut.send_expect( + for i in self.sut_ports: + pf_intf = self.sut_node.ports_info[i]["port"].get_interface_name() + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (pf_intf, self.vfs_mac[i]), "#" ) # bind vf to vf driver try: - for i in self.dut_ports: + for i in self.sut_ports: for port in self.sriov_vfs_port[i]: port.bind_driver(self.vf_driver) except Exception as e: @@ -113,8 +113,8 @@ class TestVfSingleCorePerf(TestCase): """ destroy the setup VFs """ - for i in self.dut_ports: - self.dut.destroy_sriov_vfs_by_port(self.dut_ports[i]) + for i in self.sut_ports: + self.sut_node.destroy_sriov_vfs_by_port(self.sut_ports[i]) def flows(self): """ @@ -137,19 +137,19 @@ class TestVfSingleCorePerf(TestCase): """ payload_size = frame_size - HEADER_SIZE["ip"] - HEADER_SIZE["eth"] pcaps = {} - for _port in self.dut_ports: + for _port in self.sut_ports: if 1 == port_num: flow = [ 'Ether(dst="%s")/%s/("X"*%d)' % (self.vfs_mac[_port], self.flows()[_port], payload_size) ] pcap = os.sep.join([self.output_path, "dst{0}.pcap".format(_port)]) - self.tester.scapy_append('wrpcap("%s", [%s])' % (pcap, ",".join(flow))) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", [%s])' % (pcap, ",".join(flow))) + self.tg_node.scapy_execute() pcaps[_port] = [] pcaps[_port].append(pcap) else: - index = self.dut_ports[_port] + index = self.sut_ports[_port] cnt = 0 for layer in self.flows()[_port * 2 : (_port + 1) * 2]: flow = [ @@ -159,10 +159,10 @@ class TestVfSingleCorePerf(TestCase): pcap = os.sep.join( [self.output_path, "dst{0}_{1}.pcap".format(index, cnt)] ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s", [%s])' % (pcap, ",".join(flow)) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() if index not in pcaps: pcaps[index] = [] pcaps[index].append(pcap) @@ -175,19 +175,19 @@ class TestVfSingleCorePerf(TestCase): """ tgen_input = [] if 1 == port_num: - txIntf = self.tester.get_local_port(self.dut_ports[0]) + txIntf = self.tg_node.get_local_port(self.sut_ports[0]) rxIntf = txIntf for pcap in pcaps[0]: tgen_input.append((txIntf, rxIntf, pcap)) else: for rxPort in range(port_num): if rxPort % port_num == 0 or rxPort**2 == port_num: - txIntf = self.tester.get_local_port(self.dut_ports[rxPort + 1]) - port_id = self.dut_ports[rxPort + 1] + txIntf = self.tg_node.get_local_port(self.sut_ports[rxPort + 1]) + port_id = self.sut_ports[rxPort + 1] else: - txIntf = self.tester.get_local_port(self.dut_ports[rxPort - 1]) - port_id = self.dut_ports[rxPort - 1] - rxIntf = self.tester.get_local_port(self.dut_ports[rxPort]) + txIntf = self.tg_node.get_local_port(self.sut_ports[rxPort - 1]) + port_id = self.sut_ports[rxPort - 1] + rxIntf = self.tg_node.get_local_port(self.sut_ports[rxPort]) for pcap in pcaps[port_id]: tgen_input.append((txIntf, rxIntf, pcap)) return tgen_input @@ -197,7 +197,7 @@ class TestVfSingleCorePerf(TestCase): Run nic single core performance """ self.setup_vf_env() - port_num = len(self.dut_ports) + port_num = len(self.sut_ports) self.perf_test(port_num) self.handle_expected() self.handle_results() @@ -222,7 +222,7 @@ class TestVfSingleCorePerf(TestCase): eal_para = "" for i in range(port_num): eal_para += " -a " + self.sriov_vfs_port[i][0].pci - port_mask = utils.create_mask(self.dut_ports) + port_mask = utils.create_mask(self.sut_ports) # parameters for application/testpmd param = " --portmask=%s" % (port_mask) @@ -233,7 +233,7 @@ class TestVfSingleCorePerf(TestCase): thread_num = int( fwd_config[fwd_config.find("/") + 1 : fwd_config.find("T")] ) - core_list = self.dut.get_core_list(core_config, socket=self.socket) + core_list = self.sut_node.get_core_list(core_config, socket=self.socket) self.verify( len(core_list) >= thread_num, "the Hyper-threading not open, please open it to test", @@ -279,28 +279,28 @@ class TestVfSingleCorePerf(TestCase): self.pmdout.start_testpmd( core_list, parameter, eal_para, socket=self.socket ) - self.dut.send_expect("set fwd mac", "testpmd> ", 15) - self.dut.send_expect("start", "testpmd> ", 15) + self.sut_node.send_expect("set fwd mac", "testpmd> ", 15) + self.sut_node.send_expect("start", "testpmd> ", 15) vm_config = self.set_fields() # clear streams before add new streams - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() - # run packet generator + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, vm_config, self.tester.pktgen + tgenInput, 100, vm_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"duration": self.test_duration} - _, packets_received = self.tester.pktgen.measure_throughput( + _, packets_received = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) self.verify(packets_received > 0, "No traffic detected") throughput = packets_received / 1000000.0 self.throughput[fwd_config][frame_size][nb_desc] = throughput - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("quit", "# ", 30) self.verify( throughput, @@ -327,7 +327,7 @@ class TestVfSingleCorePerf(TestCase): for fwd_config in list(self.test_parameters.keys()): ret_datas = {} for frame_size in list(self.test_parameters[fwd_config].keys()): - wirespeed = self.wirespeed(self.nic, frame_size, len(self.dut_ports)) + wirespeed = self.wirespeed(self.nic, frame_size, len(self.sut_ports)) ret_datas[frame_size] = {} for nb_desc in self.test_parameters[fwd_config][frame_size]: ret_data = {} @@ -450,4 +450,4 @@ class TestVfSingleCorePerf(TestCase): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_vf_smoke.py b/tests/TestSuite_vf_smoke.py index b19160c6..665218e5 100644 --- a/tests/TestSuite_vf_smoke.py +++ b/tests/TestSuite_vf_smoke.py @@ -2,8 +2,8 @@ # Copyright(c) 2021 Intel Corporation # -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from .smoke_base import ( @@ -28,60 +28,60 @@ class TestVfSmoke(TestCase): """ # Based on h/w type, choose how many ports to use - self.smoke_dut_ports = self.dut.get_ports(self.nic) + self.smoke_sut_ports = self.sut_node.get_ports(self.nic) self.check_session = None # Verify that enough ports are available - self.verify(len(self.smoke_dut_ports) >= 1, "Insufficient ports") - self.pf_interface = self.dut.ports_info[self.smoke_dut_ports[0]]["intf"] - self.smoke_tester_port = self.tester.get_local_port(self.smoke_dut_ports[0]) - self.smoke_tester_nic = self.tester.get_interface(self.smoke_tester_port) - self.smoke_tester_mac = self.tester.get_mac(self.smoke_dut_ports[0]) - self.smoke_dut_mac = VF_MAC_ADDR + self.verify(len(self.smoke_sut_ports) >= 1, "Insufficient ports") + self.pf_interface = self.sut_node.ports_info[self.smoke_sut_ports[0]]["intf"] + self.smoke_tg_port = self.tg_node.get_local_port(self.smoke_sut_ports[0]) + self.smoke_tg_nic = self.tg_node.get_interface(self.smoke_tg_port) + self.smoke_tg_mac = self.tg_node.get_mac(self.smoke_sut_ports[0]) + self.smoke_sut_mac = VF_MAC_ADDR # Verify that enough core - self.cores = self.dut.get_core_list("1S/4C/1T") + self.cores = self.sut_node.get_core_list("1S/4C/1T") self.verify(self.cores is not None, "Insufficient cores for speed testing") # init pkt - self.pkt = Packet() - self.port = self.smoke_dut_ports[0] - self.dutobj = self.dut.ports_info[self.port]["port"] + self.scapy_pkt_builder = ScapyPacketBuilder() + self.port = self.smoke_sut_ports[0] + self.sut_node = self.sut_node.ports_info[self.port]["port"] # generate vf - self.dut.bind_interfaces_linux(self.kdriver) + self.sut_node.bind_interfaces_linux(self.kdriver) # The MTU of ixgbe driver can only be set through pf setting - self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) - self.dut.generate_sriov_vfs_by_port(self.smoke_dut_ports[0], 1, self.kdriver) - self.vf_ports = self.dut.ports_info[self.smoke_dut_ports[0]]["vfs_port"] + self.sut_node.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) + self.sut_node.generate_sriov_vfs_by_port(self.smoke_sut_ports[0], 1, self.kdriver) + self.vf_ports = self.sut_node.ports_info[self.smoke_sut_ports[0]]["vfs_port"] self.verify(len(self.vf_ports) != 0, "VF create failed") for port in self.vf_ports: port.bind_driver(self.drivername) self.vf0_prop = {"opt_host": self.vf_ports[0].pci} - self.dut.send_expect("ifconfig %s up" % self.pf_interface, "# ") - self.tester.send_expect("ifconfig %s up" % self.smoke_tester_nic, "# ") + self.sut_node.send_expect("ifconfig %s up" % self.pf_interface, "# ") + self.tg_node.send_expect("ifconfig %s up" % self.smoke_tg_nic, "# ") # set vf mac address - self.dut.send_expect( - "ip link set %s vf 0 mac %s" % (self.pf_interface, self.smoke_dut_mac), "# " + self.sut_node.send_expect( + "ip link set %s vf 0 mac %s" % (self.pf_interface, self.smoke_sut_mac), "# " ) # set default app parameter if self.vf0_prop is not None: self.ports = [self.vf0_prop["opt_host"]] - self.pmd_out = PmdOutput(self.dut) + self.pmd_out = PmdOutput(self.sut_node) self.test_func = SmokeTest(self) - self.check_session = self.dut.new_session(suite="vf_smoke_test") + self.check_session = self.sut_node.new_session(suite="vf_smoke_test") def set_up(self): """ Run before each test case. """ - # set tester mtu and testpmd parameter + # set TG mtu and testpmd parameter if self._suite_result.test_case == "test_vf_jumbo_frames": - self.tester.send_expect( - "ifconfig {} mtu {}".format(self.smoke_tester_nic, JUMBO_FRAME_MTU), + self.tg_node.send_expect( + "ifconfig {} mtu {}".format(self.smoke_tg_nic, JUMBO_FRAME_MTU), "# ", ) self.param = ( @@ -103,18 +103,18 @@ class TestVfSmoke(TestCase): self.pmd_out.start_testpmd(cores=self.cores, ports=self.ports, param=self.param) # set default param - self.dut.send_expect("set promisc all off", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) def test_vf_jumbo_frames(self): """ This case aims to test transmitting jumbo frame packet on testpmd with jumbo frame support. """ - self.dut.send_expect("set verbose 3", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set verbose 3", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) result = self.test_func.check_jumbo_frames(self.kdriver) self.verify(result, "enable disable jumbo frames failed") @@ -122,10 +122,10 @@ class TestVfSmoke(TestCase): """ Check default rss function. """ - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) result = self.test_func.check_rss() self.verify(result, "enable disable rss failed") @@ -133,40 +133,40 @@ class TestVfSmoke(TestCase): """ Check dpdk queue configure. """ - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.pmd_out.wait_link_status_up(self.smoke_dut_ports[0]) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.pmd_out.wait_link_status_up(self.smoke_sut_ports[0]) result = self.test_func.check_tx_rx_queue() self.verify(result, "check tx rx queue failed") def tear_down(self): - # set tester mtu to default value + # set TG mtu to default value self.pmd_out.execute_cmd("stop") if self._suite_result.test_case == "test_vf_jumbo_frames": - self.tester.send_expect( - "ifconfig {} mtu {}".format(self.smoke_tester_nic, DEFAULT_MTU_VALUE), + self.tg_node.send_expect( + "ifconfig {} mtu {}".format(self.smoke_tg_nic, DEFAULT_MTU_VALUE), "# ", ) # set dpdk queues to launch value if self._suite_result.test_case == "test_vf_tx_rx_queue": - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect( + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect( "port config all rxq {}".format(LAUNCH_QUEUE), "testpmd> " ) - self.dut.send_expect( + self.sut_node.send_expect( "port config all txq {}".format(LAUNCH_QUEUE), "testpmd> " ) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("quit", "# ") - self.dut.kill_all() + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("quit", "# ") + self.sut_node.kill_all() def tear_down_all(self): if self.check_session: - self.dut.close_session(self.check_session) + self.sut_node.close_session(self.check_session) self.check_session = None - self.dut.kill_all() + self.sut_node.kill_all() if self.vf0_prop: - self.dut.destroy_sriov_vfs_by_port(self.smoke_dut_ports[0]) - self.dut.bind_interfaces_linux(self.drivername) + self.sut_node.destroy_sriov_vfs_by_port(self.smoke_sut_ports[0]) + self.sut_node.bind_interfaces_linux(self.drivername) diff --git a/tests/TestSuite_vf_to_vf_nic_bridge.py b/tests/TestSuite_vf_to_vf_nic_bridge.py index d13f9cef..6cdc7350 100644 --- a/tests/TestSuite_vf_to_vf_nic_bridge.py +++ b/tests/TestSuite_vf_to_vf_nic_bridge.py @@ -26,8 +26,8 @@ class TestVF2VFBridge(TestCase): supported_vf_driver = ["pci-stub", "vfio-pci"] def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports") self.vm0 = None self.vm1 = None @@ -40,21 +40,21 @@ class TestVF2VFBridge(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") def set_up(self): self.set_up_vf_to_vf_env() def set_up_vf_to_vf_env(self, driver="default"): - self.pf_port_for_vfs = self.dut_ports[0] - self.dut.restore_interfaces() - self.dut.generate_sriov_vfs_by_port( + self.pf_port_for_vfs = self.sut_ports[0] + self.sut_node.restore_interfaces() + self.sut_node.generate_sriov_vfs_by_port( self.pf_port_for_vfs, VF_NUMS_ON_ONE_PF, driver=driver ) - self.sriov_vfs_ports = self.dut.ports_info[self.pf_port_for_vfs]["vfs_port"] - self.host_port_intf = self.dut.ports_info[self.pf_port_for_vfs]["intf"] + self.sriov_vfs_ports = self.sut_node.ports_info[self.pf_port_for_vfs]["vfs_port"] + self.host_port_intf = self.sut_node.ports_info[self.pf_port_for_vfs]["intf"] for i in range(VF_NUMS_ON_ONE_PF): - self.dut.send_expect( + self.sut_node.send_expect( "ip link set dev %s vf %d mac %s" % (self.host_port_intf, i, VF_TEMP_MAC % i), "#", @@ -70,20 +70,20 @@ class TestVF2VFBridge(TestCase): vf0_prop = {"opt_host": self.sriov_vfs_ports[0].pci} vf1_prop = {"opt_host": self.sriov_vfs_ports[1].pci} time.sleep(1) - self.vm0 = VM(self.dut, "vm0", "vf_to_vf_bridge") + self.vm0 = VM(self.sut_node, "vm0", "vf_to_vf_bridge") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) try: - self.vm0_dut = self.vm0.start() - if self.vm0_dut is None: + self.vm0_sut = self.vm0.start() + if self.vm0_sut is None: raise Exception("Set up VM0 failed") except Exception as e: print(utils.RED(str(e))) - self.vm1 = VM(self.dut, "vm1", "vf_to_vf_bridge") + self.vm1 = VM(self.sut_node, "vm1", "vf_to_vf_bridge") self.vm1.set_vm_device(driver=self.vf_assign_method, **vf1_prop) try: - self.vm1_dut = self.vm1.start() - if self.vm1_dut is None: + self.vm1_sut = self.vm1.start() + if self.vm1_sut is None: raise Exception("Set up VM1 failed") except Exception as e: print(utils.RED(str(e))) @@ -95,24 +95,24 @@ class TestVF2VFBridge(TestCase): if self.vm1 is not None: self.vm1.stop() self.vm1 = None - self.dut.virt_exit() + self.sut_node.virt_exit() if self.pf_port_for_vfs is not None: - self.dut.destroy_sriov_vfs_by_port(self.pf_port_for_vfs) - port = self.dut.ports_info[self.pf_port_for_vfs]["port"] + self.sut_node.destroy_sriov_vfs_by_port(self.pf_port_for_vfs) + port = self.sut_node.ports_info[self.pf_port_for_vfs]["port"] port.bind_driver() self.pf_port_for_vfs = 0 def test_2vf_d2d_testpmd_stream(self): - self.vm0_ports = self.vm0_dut.get_ports("any") - self.vm0_pmd = PmdOutput(self.vm0_dut) + self.vm0_ports = self.vm0_sut.get_ports("any") + self.vm0_pmd = PmdOutput(self.vm0_sut) self.vm0_pmd.start_testpmd("all") self.vm0_pmd.execute_cmd("set fwd rxonly") self.vm0_pmd.execute_cmd("set promisc all off") self.vm0_pmd.execute_cmd("start") - vm0_mac = self.vm0_dut.ports_info[self.vm0_ports[0]]["mac"] + vm0_mac = self.vm0_sut.ports_info[self.vm0_ports[0]]["mac"] - self.vm1_pmd = PmdOutput(self.vm1_dut) + self.vm1_pmd = PmdOutput(self.vm1_sut) self.vm1_pmd.start_testpmd("all") self.vm1_pmd.execute_cmd("set fwd mac") self.vm1_pmd.execute_cmd("set promisc all off") @@ -131,22 +131,22 @@ class TestVF2VFBridge(TestCase): self.verify(recv_num is SEND_PACKET, "Rx port recv error: %d" % recv_num) def test_2vf_d2k_testpmd_stream(self): - self.vm0_dut.restore_interfaces() - self.vm0_ports = self.vm0_dut.get_ports("any") - vf0_intf = self.vm0_dut.ports_info[self.vm0_ports[0]]["intf"] + self.vm0_sut.restore_interfaces() + self.vm0_ports = self.vm0_sut.get_ports("any") + vf0_intf = self.vm0_sut.ports_info[self.vm0_ports[0]]["intf"] - self.vm1_ports = self.vm1_dut.get_ports("any") + self.vm1_ports = self.vm1_sut.get_ports("any") - vm0_mac = self.vm0_dut.ports_info[self.vm0_ports[0]]["mac"] + vm0_mac = self.vm0_sut.ports_info[self.vm0_ports[0]]["mac"] filename = "m.pcap" - self.vm0_dut.send_expect( + self.vm0_sut.send_expect( "tcpdump -i %s ether dst %s -w %s" % (vf0_intf, vm0_mac, filename), "tcpdump", 30, ) - self.vm1_pmd = PmdOutput(self.vm1_dut) + self.vm1_pmd = PmdOutput(self.vm1_sut) self.vm1_pmd.start_testpmd("all") self.vm1_pmd.execute_cmd("set fwd mac") self.vm1_pmd.execute_cmd("set promisc all off") @@ -155,13 +155,13 @@ class TestVF2VFBridge(TestCase): self.vm1_pmd.execute_cmd("start tx_first 2") time.sleep(1) - recv_tcpdump = self.vm0_dut.send_expect("^C", "#", 30) + recv_tcpdump = self.vm0_sut.send_expect("^C", "#", 30) time.sleep(5) recv_pattern = re.compile("(\d+) packet\w{0,1} captured") recv_info = recv_pattern.search(recv_tcpdump) recv_str = recv_info.group(0).split(" ")[0] recv_number = int(recv_str, 10) - self.vm0_dut.bind_interfaces_linux(self.drivername) + self.vm0_sut.bind_interfaces_linux(self.drivername) self.vm1_pmd.execute_cmd("stop") self.vm1_pmd.execute_cmd("quit", "# ") @@ -169,39 +169,39 @@ class TestVF2VFBridge(TestCase): self.verify(recv_number is SEND_PACKET, "Rx port recv error: %d" % recv_number) def test_2vf_k2d_scapy_stream(self): - self.vm0_ports = self.vm0_dut.get_ports("any") - self.vm0_pmd = PmdOutput(self.vm0_dut) + self.vm0_ports = self.vm0_sut.get_ports("any") + self.vm0_pmd = PmdOutput(self.vm0_sut) self.vm0_pmd.start_testpmd("all") - self.vm1_ports = self.vm1_dut.get_ports("any") - self.vm1_dut.restore_interfaces() - vf1_intf = self.vm1_dut.ports_info[self.vm1_ports[0]]["intf"] + self.vm1_ports = self.vm1_sut.get_ports("any") + self.vm1_sut.restore_interfaces() + vf1_intf = self.vm1_sut.ports_info[self.vm1_ports[0]]["intf"] - dst_mac = self.vm0_dut.ports_info[self.vm0_ports[0]]["mac"] - src_mac = self.vm1_dut.ports_info[self.vm1_ports[0]]["mac"] + dst_mac = self.vm0_sut.ports_info[self.vm0_ports[0]]["mac"] + src_mac = self.vm1_sut.ports_info[self.vm1_ports[0]]["mac"] pkt_content = 'Ether(dst="%s", src="%s")/IP()/Raw(load="X"*46)' % ( dst_mac, src_mac, ) - self.vm1_dut.send_expect("scapy", ">>> ", 10) + self.vm1_sut.send_expect("scapy", ">>> ", 10) self.vm0_pmd.execute_cmd("set promisc all off") self.vm0_pmd.execute_cmd("set fwd rxonly") self.vm0_pmd.execute_cmd("set verbose 1") self.vm0_pmd.execute_cmd("start") - self.vm1_dut.send_expect( + self.vm1_sut.send_expect( 'sendp([%s], iface="%s", count=%d)' % (pkt_content, vf1_intf, SEND_PACKET), ">>> ", 30, ) - out = self.vm0_dut.get_session_output(timeout=60) + out = self.vm0_sut.get_session_output(timeout=60) rx_packets = re.findall("src=%s - dst=%s" % (src_mac, dst_mac), out) recv_num = len(rx_packets) - self.vm1_dut.send_expect("quit()", "# ", 10) - self.vm1_dut.bind_interfaces_linux(self.drivername) + self.vm1_sut.send_expect("quit()", "# ", 10) + self.vm1_sut.bind_interfaces_linux(self.drivername) self.vm0_pmd.execute_cmd("stop") self.vm0_pmd.execute_cmd("quit", "# ") diff --git a/tests/TestSuite_vf_vlan.py b/tests/TestSuite_vf_vlan.py index e73ad479..878e5e5c 100644 --- a/tests/TestSuite_vf_vlan.py +++ b/tests/TestSuite_vf_vlan.py @@ -6,8 +6,8 @@ import random import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import get_nic_name from framework.test_case import TestCase from framework.virt_common import VM @@ -22,8 +22,8 @@ class TestVfVlan(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") self.vm0 = None self.env_done = False @@ -36,17 +36,17 @@ class TestVfVlan(TestCase): self.vf_assign_method = "pci-assign" else: self.vf_assign_method = "vfio-pci" - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_node.send_expect("modprobe vfio-pci", "#") # get driver version self.driver_version = self.nic_obj.driver_version # bind to default driver - self.bind_nic_driver(self.dut_ports[:2], driver="") - self.host_intf0 = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.bind_nic_driver(self.sut_ports[:2], driver="") + self.host_intf0 = self.sut_node.ports_info[self.sut_ports[0]]["intf"] # get priv-flags default stats self.flag = "vf-vlan-pruning" - self.default_stats = self.dut.get_priv_flags_state(self.host_intf0, self.flag) + self.default_stats = self.sut_node.get_priv_flags_state(self.host_intf0, self.flag) def set_up(self): self.setup_vm_env() @@ -58,38 +58,38 @@ class TestVfVlan(TestCase): if self.env_done: return - self.used_dut_port_0 = self.dut_ports[0] - self.host_intf0 = self.dut.ports_info[self.used_dut_port_0]["intf"] - tester_port = self.tester.get_local_port(self.used_dut_port_0) - self.tester_intf0 = self.tester.get_interface(tester_port) + self.used_sut_port_0 = self.sut_ports[0] + self.host_intf0 = self.sut_node.ports_info[self.used_sut_port_0]["intf"] + tg_port = self.tg_node.get_local_port(self.used_sut_port_0) + self.tg_intf0 = self.tg_node.get_interface(tg_port) if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.host_intf0, self.flag), "# " ) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) - self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.sut_node.ports_info[self.used_sut_port_0]["vfs_port"] if self.kdriver == "ice": - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 spoofchk off" % (self.host_intf0), "# " ) self.vf0_mac = "00:10:00:00:00:00" - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf0, self.vf0_mac), "# " ) - self.used_dut_port_1 = self.dut_ports[1] - self.host_intf1 = self.dut.ports_info[self.used_dut_port_1]["intf"] + self.used_sut_port_1 = self.sut_ports[1] + self.host_intf1 = self.sut_node.ports_info[self.used_sut_port_1]["intf"] if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s on" % (self.host_intf1, self.flag), "# " ) - self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver=driver) - self.sriov_vfs_port_1 = self.dut.ports_info[self.used_dut_port_1]["vfs_port"] - tester_port = self.tester.get_local_port(self.used_dut_port_1) - self.tester_intf1 = self.tester.get_interface(tester_port) + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port_1, 1, driver=driver) + self.sriov_vfs_port_1 = self.sut_node.ports_info[self.used_sut_port_1]["vfs_port"] + tg_port = self.tg_node.get_local_port(self.used_sut_port_1) + self.tg_intf1 = self.tg_node.get_interface(tg_port) self.vf1_mac = "00:20:00:00:00:00" - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf1, self.vf1_mac), "# " ) @@ -106,11 +106,11 @@ class TestVfVlan(TestCase): vf1_prop = {"opt_host": self.sriov_vfs_port_1[0].pci} # set up VM0 ENV - self.vm0 = VM(self.dut, "vm0", "vf_vlan") + self.vm0 = VM(self.sut_node, "vm0", "vf_vlan") self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) - self.vm_dut_0 = self.vm0.start() - if self.vm_dut_0 is None: + self.vm_sut_0 = self.vm0.start() + if self.vm_sut_0 is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: @@ -121,26 +121,26 @@ class TestVfVlan(TestCase): def destroy_vm_env(self): if getattr(self, "vm0", None): - if getattr(self, "vm_dut_0", None): - self.vm_dut_0.kill_all() + if getattr(self, "vm_sut_0", None): + self.vm_sut_0.kill_all() self.vm0_testpmd = None - self.vm0_dut_ports = None + self.vm0_sut_ports = None # destroy vm0 self.vm0.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm0 = None - if getattr(self, "used_dut_port_0", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) - port = self.dut.ports_info[self.used_dut_port_0]["port"] - self.used_dut_port_0 = None + if getattr(self, "used_sut_port_0", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_0) + port = self.sut_node.ports_info[self.used_sut_port_0]["port"] + self.used_sut_port_0 = None - if getattr(self, "used_dut_port_1", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) - port = self.dut.ports_info[self.used_dut_port_1]["port"] - self.used_dut_port_1 = None + if getattr(self, "used_sut_port_1", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port_1) + port = self.sut_node.ports_info[self.used_sut_port_1]["port"] + self.used_sut_port_1 = None - self.bind_nic_driver(self.dut_ports[:2], driver="") + self.bind_nic_driver(self.sut_ports[:2], driver="") self.env_done = False @@ -150,59 +150,59 @@ class TestVfVlan(TestCase): """ random_vlan = random.randint(1, MAX_VLAN) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 vlan %d" % (self.host_intf0, random_vlan), "# " ) - out = self.dut.send_expect("ip link show %s" % self.host_intf0, "# ") + out = self.sut_node.send_expect("ip link show %s" % self.host_intf0, "# ") self.verify("vlan %d" % random_vlan in out, "Failed to add pvid on VF0") - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd("set fwd mac") self.vm0_testpmd.execute_cmd("start") - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": self.vf1_mac}) - inst = self.tester.tcpdump_sniff_packets(self.tester_intf0) - pkt.send_pkt(self.tester, tx_port=self.tester_intf1) - pkts = self.tester.load_tcpdump_sniff_packets(inst) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": self.vf1_mac}) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf0) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf1) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) self.verify(len(pkts), "Not receive expected packet") self.vm0_testpmd.quit() # disable pvid - self.dut.send_expect("ip link set %s vf 0 vlan 0" % (self.host_intf0), "# ") + self.sut_node.send_expect("ip link set %s vf 0 vlan 0" % (self.host_intf0), "# ") def send_and_getout(self, vlan=0, pkt_type="UDP"): if pkt_type == "UDP": - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": self.vf0_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": self.vf0_mac}) elif pkt_type == "VLAN_UDP": - pkt = Packet(pkt_type="VLAN_UDP") - pkt.config_layer("vlan", {"vlan": vlan}) - pkt.config_layer("ether", {"dst": self.vf0_mac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") + scapy_pkt_builder.config_layer("vlan", {"vlan": vlan}) + scapy_pkt_builder.config_layer("ether", {"dst": self.vf0_mac}) - pkt.send_pkt(self.tester, tx_port=self.tester_intf0) - out = self.vm_dut_0.get_session_output(timeout=2) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tg_intf0) + out = self.vm_sut_0.get_session_output(timeout=2) return out def test_add_pvid_vf(self): random_vlan = random.randint(1, MAX_VLAN) - self.dut.send_expect( + self.sut_node.send_expect( "ip link set %s vf 0 vlan %d" % (self.host_intf0, random_vlan), "# " ) - out = self.dut.send_expect("ip link show %s" % self.host_intf0, "# ") + out = self.sut_node.send_expect("ip link show %s" % self.host_intf0, "# ") self.verify("vlan %d" % random_vlan in out, "Failed to add pvid on VF0") # start testpmd in VM - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") @@ -223,8 +223,8 @@ class TestVfVlan(TestCase): # remove vlan self.vm0_testpmd.execute_cmd("stop") self.vm0_testpmd.execute_cmd("port stop all") - self.dut.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf0, "# ") - out = self.dut.send_expect("ip link show %s" % self.host_intf0, "# ") + self.sut_node.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf0, "# ") + out = self.sut_node.send_expect("ip link show %s" % self.host_intf0, "# ") self.verify("vlan %d" % random_vlan not in out, "Failed to remove pvid on VF0") # send packet with vlan @@ -249,16 +249,16 @@ class TestVfVlan(TestCase): self.vm0_testpmd.quit() # disable pvid - self.dut.send_expect("ip link set %s vf 0 vlan 0" % (self.host_intf0), "# ") + self.sut_node.send_expect("ip link set %s vf 0 vlan 0" % (self.host_intf0), "# ") def tx_and_check(self, tx_vlan=1): - inst = self.tester.tcpdump_sniff_packets(self.tester_intf0) + inst = self.tg_node.tcpdump_sniff_packets(self.tg_intf0) self.vm0_testpmd.execute_cmd("set burst 1") self.vm0_testpmd.execute_cmd("start tx_first") self.vm0_testpmd.execute_cmd("stop") # strip sniffered vlans - pkts = self.tester.load_tcpdump_sniff_packets(inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(inst) vlans = [] for i in range(len(pkts)): vlan = pkts.strip_element_vlan("vlan", p_index=i) @@ -271,9 +271,9 @@ class TestVfVlan(TestCase): random_vlan = random.randint(1, MAX_VLAN) tx_vlans = [1, random_vlan, MAX_VLAN] # start testpmd in VM - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd("set verbose 1") @@ -296,9 +296,9 @@ class TestVfVlan(TestCase): random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] # start testpmd in VM - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd("set fwd rxonly") self.vm0_testpmd.execute_cmd("set verbose 1") @@ -327,7 +327,7 @@ class TestVfVlan(TestCase): "VLAN tci=%s" % vlan_hex in out, "Not received expected vlan packet!!!" ) - pkt = Packet(pkt_type="VLAN_UDP") + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") if rx_vlan == MAX_VLAN: continue wrong_vlan = (rx_vlan + 1) % 4096 @@ -371,9 +371,9 @@ class TestVfVlan(TestCase): random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] # start testpmd in VM - self.vm0_dut_ports = self.vm_dut_0.get_ports("any") + self.vm0_sut_ports = self.vm_sut_0.get_ports("any") - self.vm0_testpmd = PmdOutput(self.vm_dut_0) + self.vm0_testpmd = PmdOutput(self.vm_sut_0) if self.kdriver == "i40e": self.vm0_testpmd.start_testpmd(VM_CORES_MASK, "") else: @@ -416,7 +416,7 @@ class TestVfVlan(TestCase): def tear_down_all(self): self.destroy_vm_env() if self.is_eth_series_nic(800) and self.default_stats: - self.dut.send_expect( + self.sut_node.send_expect( "ethtool --set-priv-flags %s %s %s" % (self.host_intf0, self.flag, self.default_stats), "# ", diff --git a/tests/TestSuite_vhost_1024_ethports.py b/tests/TestSuite_vhost_1024_ethports.py index 42cdcc38..7ab6a8f8 100644 --- a/tests/TestSuite_vhost_1024_ethports.py +++ b/tests/TestSuite_vhost_1024_ethports.py @@ -19,32 +19,32 @@ class TestVhost1024Ethports(TestCase): # DPDK limits the number of vdev to 1023 self.max_ethport = 1023 self.queue = 1 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.mem_channels = self.dut.get_memory_channels() - cores = self.dut.get_core_list("1S/2C/1T") - self.pci_info = self.dut.ports_info[0]["pci"] + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.mem_channels = self.sut_node.get_memory_channels() + cores = self.sut_node.get_core_list("1S/2C/1T") + self.pci_info = self.sut_node.ports_info[0]["pci"] self.build_user_dpdk() - self.testpmd_path = self.dut.apps_name["test-pmd"] + self.testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.testpmd_path.split("/")[-1] def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") - self.vhost_user = self.dut.new_session(suite="vhost-user") + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") def build_user_dpdk(self): - self.dut.build_install_dpdk(self.target, extra_options="-Dmax_ethports=1024") + self.sut_node.build_install_dpdk(self.target, extra_options="-Dmax_ethports=1024") def restore_dpdk(self): - self.dut.build_install_dpdk(self.target, extra_options="-Dmax_ethports=32") + self.sut_node.build_install_dpdk(self.target, extra_options="-Dmax_ethports=32") @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -54,9 +54,9 @@ class TestVhost1024Ethports(TestCase): Test function of launch vhost with 1024 ethports """ if self.check_2M_env: - hugepages = int(self.dut.get_total_huge_pages()) + hugepages = int(self.sut_node.get_total_huge_pages()) if hugepages < 20480: - self.dut.send_expect( + self.sut_node.send_expect( "echo 20480 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages", expected="# ", timeout=30, @@ -68,7 +68,7 @@ class TestVhost1024Ethports(TestCase): ethport, self.queue, ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores="1S/2C/1T", prefix="vhost", ports=[self.pci_info] ) command_line_client = ( @@ -89,8 +89,8 @@ class TestVhost1024Ethports(TestCase): """ Run after each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.dut.close_session(self.vhost_user) + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.sut_node.close_session(self.vhost_user) def tear_down_all(self): """ diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py index 0584809d..319eff26 100644 --- a/tests/TestSuite_vhost_cbdma.py +++ b/tests/TestSuite_vhost_cbdma.py @@ -8,11 +8,11 @@ import re from copy import deepcopy import framework.rst as rst -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream SPLIT_RING_PATH = { "inorder_mergeable_path": "mrg_rxbuf=1,in_order=1", @@ -34,26 +34,26 @@ PACKED_RING_PATH = { class TestVhostCbdma(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() self.number_of_ports = 1 - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user) + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user_pmd = PmdOutput(self.sut_node, self.virtio_user) self.virtio_mac = "00:01:02:03:04:05" self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] - self.pci_info = self.dut.ports_info[0]["pci"] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.cores_list[0:9] self.virtio_core_list = self.cores_list[9:11] self.out_path = "/tmp/%s" % self.suite_name - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") - self.pktgen_helper = PacketGeneratorHelper() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.testpmd_name = self.dut.apps_name["test-pmd"].split("/")[-1] + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") + self.pktgen_helper = TrafficGeneratorStream() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.testpmd_name = self.sut_node.apps_name["test-pmd"].split("/")[-1] self.save_result_flag = True self.json_obj = {} @@ -69,9 +69,9 @@ class TestVhostCbdma(TestCase): self.gap = self.get_suite_cfg()["accepted_tolerance"] self.test_result = {} self.nb_desc = self.test_parameters.get(list(self.test_parameters.keys())[0])[0] - self.dut.send_expect("killall -I %s" % self.testpmd_name, "#", 20) - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("rm -rf /tmp/s0", "#") + self.sut_node.send_expect("killall -I %s" % self.testpmd_name, "#", 20) + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("rm -rf /tmp/s0", "#") self.mode_list = [] def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): @@ -81,7 +81,7 @@ class TestVhostCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -105,7 +105,7 @@ class TestVhostCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -140,11 +140,11 @@ class TestVhostCbdma(TestCase): return lcore_dma_param def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -183,7 +183,7 @@ class TestVhostCbdma(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -227,7 +227,7 @@ class TestVhostCbdma(TestCase): ) ) virtio_param = "--nb-cores=1 --txq=1 --rxq=1 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -333,7 +333,7 @@ class TestVhostCbdma(TestCase): ) ) virtio_param = " --nb-cores=1 --txq=8 --rxq=8 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -439,7 +439,7 @@ class TestVhostCbdma(TestCase): ) ) virtio_param = " --nb-cores=1 --txq=8 --rxq=8 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -633,7 +633,7 @@ class TestVhostCbdma(TestCase): ) ) virtio_param = " --nb-cores=1 --txq=1 --rxq=1 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -739,7 +739,7 @@ class TestVhostCbdma(TestCase): ) ) virtio_param = "--nb-cores=1 --txq=8 --rxq=8 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -874,7 +874,7 @@ class TestVhostCbdma(TestCase): vhost_eal_param = "--vdev 'net_vhost0,iface=/tmp/s0,queues=1,client=1'" vhost_param = " --nb-cores=1 --txq=1 --rxq=1 --txd=1024 --rxd=1024" virtio_param = " --nb-cores=1 --txq=8 --rxq=8 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -1118,7 +1118,7 @@ class TestVhostCbdma(TestCase): lcore_dma ) ) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -1226,7 +1226,7 @@ class TestVhostCbdma(TestCase): lcore_dma ) ) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -1336,7 +1336,7 @@ class TestVhostCbdma(TestCase): lcore_dma ) ) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -1533,7 +1533,7 @@ class TestVhostCbdma(TestCase): lcore_dma ) ) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -1639,7 +1639,7 @@ class TestVhostCbdma(TestCase): lcore_dma ) ) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -1778,7 +1778,7 @@ class TestVhostCbdma(TestCase): vhost_eal_param = "--vdev 'net_vhost0,iface=/tmp/s0,queues=1,client=1'" vhost_param = " --nb-cores=1 --txq=1 --rxq=1 --txd=1024 --rxd=1024" virtio_param = " --nb-cores=1 --txq=8 --rxq=8 --txd=1024 --rxd=1024" - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for i in range(cbdma_num): allow_pci.append(self.cbdma_list[i]) self.start_vhost_testpmd( @@ -2002,29 +2002,29 @@ class TestVhostCbdma(TestCase): def send_imix_packets(self, mode): """ - Send imix packet with packet generator and verify + Send imix packet with traffic generator and verify """ frame_sizes = [64, 128, 256, 512, 1024, 1518] tgenInput = [] for frame_size in frame_sizes: payload_size = frame_size - self.headers_size - port = self.tester.get_local_port(self.dut_ports[0]) + port = self.tg_node.get_local_port(self.sut_ports[0]) fields_config = { "ip": { "src": {"action": "random"}, }, } - pkt = Packet() - pkt.assign_layers(["ether", "ipv4", "raw"]) - pkt.config_layers( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "ipv4", "raw"]) + scapy_pkt_builder.config_layers( [ ("ether", {"dst": "%s" % self.virtio_mac}), ("ipv4", {"src": "1.1.1.1"}), ("raw", {"payload": ["01"] * int("%d" % payload_size)}), ] ) - pkt.save_pcapfile( - self.tester, + scapy_pkt_builder.save_pcapfile( + self.tg_node, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size), ) tgenInput.append( @@ -2035,12 +2035,12 @@ class TestVhostCbdma(TestCase): ) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, fields_config, self.tester.pktgen + tgenInput, 100, fields_config, self.tg_node.perf_tg ) trans_options = {"delay": 5, "duration": self.test_duration} - bps, pps = self.tester.pktgen.measure_throughput( + bps, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=trans_options ) Mpps = pps / 1000000.0 @@ -2215,13 +2215,13 @@ class TestVhostCbdma(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -I %s" % self.testpmd_name, "#", 20) + self.sut_node.send_expect("killall -I %s" % self.testpmd_name, "#", 20) self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) - self.dut.kill_all() + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) + self.sut_node.kill_all() diff --git a/tests/TestSuite_vhost_event_idx_interrupt.py b/tests/TestSuite_vhost_event_idx_interrupt.py index 4f5b4bee..0db55a87 100644 --- a/tests/TestSuite_vhost_event_idx_interrupt.py +++ b/tests/TestSuite_vhost_event_idx_interrupt.py @@ -23,14 +23,14 @@ class TestVhostEventIdxInterrupt(TestCase): """ self.vm_num = 1 self.queues = 1 - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) self.prepare_l3fwd_power() - self.pci_info = self.dut.ports_info[0]["pci"] - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.app_l3fwd_power_path = self.dut.apps_name["l3fwd-power"] + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.app_l3fwd_power_path = self.sut_node.apps_name["l3fwd-power"] self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cbdma_dev_infos = [] self.device_str = None @@ -40,11 +40,11 @@ class TestVhostEventIdxInterrupt(TestCase): """ # Clean the execution ENV self.verify_info = [] - self.dut.send_expect(f"killall {self.l3fwdpower_name}", "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vhost = self.dut.new_session(suite="vhost-l3fwd") - self.vm_dut = [] + self.sut_node.send_expect(f"killall {self.l3fwdpower_name}", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vhost = self.sut_node.new_session(suite="vhost-l3fwd") + self.vm_sut = [] self.vm = [] self.nopci = True @@ -54,10 +54,10 @@ class TestVhostEventIdxInterrupt(TestCase): self.cores_num >= self.queues * self.vm_num, "There has not enought cores to test this case %s" % self.running_case, ) - self.core_list_l3fwd = self.dut.get_core_list(self.core_config) + self.core_list_l3fwd = self.sut_node.get_core_list(self.core_config) def prepare_l3fwd_power(self): - out = self.dut.build_dpdk_apps("examples/l3fwd-power") + out = self.sut_node.build_dpdk_apps("examples/l3fwd-power") self.verify("Error" not in out, "compilation l3fwd-power error") def list_split(self, items, n): @@ -65,7 +65,7 @@ class TestVhostEventIdxInterrupt(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -131,7 +131,7 @@ class TestVhostEventIdxInterrupt(TestCase): " --log-level=9 %s -- -p %s --parse-ptype 1 --config '%s' --interrupt-only" % (vdev_info, port_info, config_info) ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_l3fwd, no_pci=self.nopci ) command_line_client = example_para + eal_params + para @@ -150,13 +150,13 @@ class TestVhostEventIdxInterrupt(TestCase): """ relauch l3fwd-power sample for port up """ - self.dut.send_expect("killall -s INT %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.l3fwdpower_name, "#") # make sure l3fwd-power be killed - pid = self.dut.send_expect( + pid = self.sut_node.send_expect( "ps -ef |grep l3|grep -v grep |awk '{print $2}'", "#" ) if pid: - self.dut.send_expect("kill -9 %s" % pid, "#") + self.sut_node.send_expect("kill -9 %s" % pid, "#") self.lanuch_l3fwd_power(cbdma) def set_vm_cpu_number(self, vm_config): @@ -178,7 +178,7 @@ class TestVhostEventIdxInterrupt(TestCase): if list(vm_config.params[i].keys())[0] == "qemu": self.vm_qemu_version = vm_config.params[i]["qemu"][0]["path"] - out = self.dut.send_expect("%s --version" % self.vm_qemu_version, "#") + out = self.sut_node.send_expect("%s --version" % self.vm_qemu_version, "#") result = re.search("QEMU\s*emulator\s*version\s*(\d*.\d*)", out) self.verify( result is not None, @@ -198,7 +198,7 @@ class TestVhostEventIdxInterrupt(TestCase): start qemus """ for i in range(vm_num): - vm_info = VM(self.dut, "vm%d" % i, "vhost_sample_copy") + vm_info = VM(self.sut_node, "vm%d" % i, "vhost_sample_copy") vm_info.load_config() vm_params = {} vm_params["driver"] = "vhost-user" @@ -216,24 +216,24 @@ class TestVhostEventIdxInterrupt(TestCase): vm_info.set_vm_device(**vm_params) self.set_vm_cpu_number(vm_info) self.check_qemu_version(vm_info) - vm_dut = None + vm_sut = None try: - vm_dut = vm_info.start(load_config=False, set_target=False) - if vm_dut is None: + vm_sut = vm_info.start(load_config=False, set_target=False) + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) - vm_dut.restore_interfaces() - self.vm_dut.append(vm_dut) + vm_sut.restore_interfaces() + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def config_virito_net_in_vm(self): """ set vitio-net with 2 quques enable """ - for i in range(len(self.vm_dut)): - vm_intf = self.vm_dut[i].ports_info[0]["intf"] - self.vm_dut[i].send_expect( + for i in range(len(self.vm_sut)): + vm_intf = self.vm_sut[i].ports_info[0]["intf"] + self.vm_sut[i].send_expect( "ethtool -L %s combined %d" % (vm_intf, self.queues), "#", 20 ) @@ -269,14 +269,14 @@ class TestVhostEventIdxInterrupt(TestCase): ping_ip = 3 for vm_index in range(self.vm_num): session_info = [] - vm_intf = self.vm_dut[vm_index].ports_info[0]["intf"] - self.vm_dut[vm_index].send_expect( + vm_intf = self.vm_sut[vm_index].ports_info[0]["intf"] + self.vm_sut[vm_index].send_expect( "ifconfig %s 1.1.1.%d" % (vm_intf, ping_ip), "#" ) ping_ip = ping_ip + 1 - self.vm_dut[vm_index].send_expect("ifconfig %s up" % vm_intf, "#") + self.vm_sut[vm_index].send_expect("ifconfig %s up" % vm_intf, "#") for queue in range(self.queues): - session = self.vm_dut[vm_index].new_session( + session = self.vm_sut[vm_index].new_session( suite="ping_info_%d" % queue ) session.send_expect( @@ -289,14 +289,14 @@ class TestVhostEventIdxInterrupt(TestCase): # close all sessions of ping in vm for sess_index in range(len(session_info)): session_info[sess_index].send_expect("^c", "#") - self.vm_dut[vm_index].close_session(session_info[sess_index]) + self.vm_sut[vm_index].close_session(session_info[sess_index]) def get_cbdma_ports_info_and_bind_to_dpdk(self): """ get all cbdma ports """ self.cbdma_dev_infos = [] - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -315,7 +315,7 @@ class TestVhostEventIdxInterrupt(TestCase): self.cbdma_dev_infos = [self.cbdma_dev_infos[0], self.cbdma_dev_infos[-1]] self.used_cbdma = self.cbdma_dev_infos[0 : self.queues * self.vm_num] self.device_str = " ".join(self.used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -324,11 +324,11 @@ class TestVhostEventIdxInterrupt(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -341,7 +341,7 @@ class TestVhostEventIdxInterrupt(TestCase): """ for i in range(len(self.vm)): self.vm[i].stop() - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#", timeout=2) + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#", timeout=2) def test_wake_up_split_ring_vhost_user_core_with_event_idx_interrupt(self): """ @@ -438,7 +438,7 @@ class TestVhostEventIdxInterrupt(TestCase): Test Case 7: wake up split ring vhost-user cores with event idx interrupt mode and cbdma enabled 16 queues test """ self.vm_num = 1 - self.bind_nic_driver(self.dut_ports) + self.bind_nic_driver(self.sut_ports) self.queues = 16 self.get_core_mask() self.nopci = False @@ -459,7 +459,7 @@ class TestVhostEventIdxInterrupt(TestCase): Test Case 8: wake up split ring vhost-user cores by multi virtio-net in VMs with event idx interrupt mode and cbdma enabled test """ self.vm_num = 2 - self.bind_nic_driver(self.dut_ports) + self.bind_nic_driver(self.sut_ports) self.queues = 1 self.get_core_mask() self.nopci = False @@ -480,7 +480,7 @@ class TestVhostEventIdxInterrupt(TestCase): Test Case 9: wake up packed ring vhost-user cores with event idx interrupt mode and cbdma enabled 16 queues test """ self.vm_num = 1 - self.bind_nic_driver(self.dut_ports) + self.bind_nic_driver(self.sut_ports) self.queues = 16 self.get_core_mask() self.nopci = False @@ -499,7 +499,7 @@ class TestVhostEventIdxInterrupt(TestCase): Test Case 10: wake up packed ring vhost-user cores by multi virtio-net in VMs with event idx interrupt mode and cbdma enabled test """ self.vm_num = 2 - self.bind_nic_driver(self.dut_ports) + self.bind_nic_driver(self.sut_ports) self.queues = 1 self.get_core_mask() self.nopci = False @@ -515,12 +515,12 @@ class TestVhostEventIdxInterrupt(TestCase): """ Run after each test case. """ - self.dut.close_session(self.vhost) - self.dut.send_expect(f"killall {self.l3fwdpower_name}", "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.close_session(self.vhost) + self.sut_node.send_expect(f"killall {self.l3fwdpower_name}", "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.bind_cbdma_device_to_kernel() if "cbdma" in self.running_case: - self.bind_nic_driver(self.dut_ports, self.drivername) + self.bind_nic_driver(self.sut_ports, self.drivername) def tear_down_all(self): """ diff --git a/tests/TestSuite_vhost_multi_queue_qemu.py b/tests/TestSuite_vhost_multi_queue_qemu.py index 22afb0a2..beee358e 100644 --- a/tests/TestSuite_vhost_multi_queue_qemu.py +++ b/tests/TestSuite_vhost_multi_queue_qemu.py @@ -11,26 +11,26 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM class TestVhostMultiQueueQemu(TestCase): def set_up_all(self): # Get and verify the ports - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") # Get the port's socket - self.pf = self.dut_ports[0] - netdev = self.dut.ports_info[self.pf]["port"] + self.pf = self.sut_ports[0] + netdev = self.sut_node.ports_info[self.pf]["port"] self.socket = netdev.get_nic_socket() - self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket) + self.cores = self.sut_node.get_core_list("1S/3C/1T", socket=self.socket) self.verify(len(self.cores) >= 3, "Insufficient cores for speed testing") - self.pci_info = self.dut.ports_info[0]["pci"] + self.pci_info = self.sut_node.ports_info[0]["pci"] self.frame_sizes = [64, 128, 256, 512, 1024, 1500] self.queue_number = 2 # Using file to save the vhost sample output since in jumboframe case, @@ -38,30 +38,30 @@ class TestVhostMultiQueueQemu(TestCase): self.virtio1 = "eth1" self.virtio1_mac = "52:54:00:00:00:01" - self.vm_dut = None + self.vm_sut = None self.number_of_ports = 1 self.header_row = ["FrameSize(B)", "Throughput(Mpps)", "LineRate(%)", "Cycle"] - self.memory_channel = self.dut.get_memory_channels() - self.pmd_out = PmdOutput(self.dut) + self.memory_channel = self.sut_node.get_memory_channels() + self.pmd_out = PmdOutput(self.sut_node) self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.app_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.app_path = self.sut_node.apps_name["test-pmd"] self.app_name = self.app_path[self.app_path.rfind("/") + 1 :] def set_up(self): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost.out", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -s INT %s" % self.app_name, "#") + self.sut_node.send_expect("rm -rf ./vhost.out", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.app_name, "#") self.vm_testpmd_vector = ( self.app_path + "-c %s -n 3" @@ -78,7 +78,7 @@ class TestVhostMultiQueueQemu(TestCase): r"'net_vhost0,iface=%s/vhost-net,queues=%d'" % (self.base_dir, self.queue_number) ] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, ports=[self.pci_info], vdevs=vdev ) para = " -- -i --rxq=%d --txq=%d --nb-cores=2" % ( @@ -86,15 +86,15 @@ class TestVhostMultiQueueQemu(TestCase): self.queue_number, ) testcmd_start = self.app_path + eal_params + para - self.dut.send_expect(testcmd_start, "testpmd> ", 120) - self.dut.send_expect("set fwd mac", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect(testcmd_start, "testpmd> ", 120) + self.sut_node.send_expect("set fwd mac", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) def start_onevm(self): """ Start One VM with one virtio device """ - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") vm_params = {} vm_params["driver"] = "vhost-user" vm_params["opt_path"] = self.base_dir + "/vhost-net" @@ -107,8 +107,8 @@ class TestVhostMultiQueueQemu(TestCase): self.vm.set_vm_device(**vm_params) try: - self.vm_dut = self.vm.start() - if self.vm_dut is None: + self.vm_sut = self.vm.start() + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -119,7 +119,7 @@ class TestVhostMultiQueueQemu(TestCase): """ Get the vm coremask """ - cores = self.vm_dut.get_core_list("1S/3C/1T") + cores = self.vm_sut.get_core_list("1S/3C/1T") self.verify( len(cores) >= 3, "Insufficient cores for speed testing, add the cpu number in cfg file.", @@ -157,9 +157,9 @@ class TestVhostMultiQueueQemu(TestCase): payload_size = frame_size - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] tgenInput = [] - pkt1 = Packet() - pkt1.assign_layers(["ether", "ipv4", "raw"]) - pkt1.config_layers( + scapy_pkt_builder1 = ScapyPacketBuilder() + scapy_pkt_builder1.assign_layers(["ether", "ipv4", "raw"]) + scapy_pkt_builder1.config_layers( [ ("ether", {"dst": "%s" % self.virtio1_mac}), ("ipv4", {"dst": "1.1.1.1"}), @@ -167,9 +167,9 @@ class TestVhostMultiQueueQemu(TestCase): ] ) - pkt1.save_pcapfile(self.tester, "%s/multiqueue.pcap" % self.out_path) + scapy_pkt_builder1.save_pcapfile(self.tg_node, "%s/multiqueue.pcap" % self.out_path) - port = self.tester.get_local_port(self.pf) + port = self.tg_node.get_local_port(self.pf) tgenInput.append((port, port, "%s/multiqueue.pcap" % self.out_path)) fields_config = { @@ -177,12 +177,12 @@ class TestVhostMultiQueueQemu(TestCase): "dst": {"action": "random"}, }, } - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, fields_config, self.tester.pktgen + tgenInput, 100, fields_config, self.tg_node.perf_tg ) traffic_opt = {"delay": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -212,22 +212,22 @@ class TestVhostMultiQueueQemu(TestCase): While verify_type is "vhost queue < virtio queue", the vhost should forward all set of data While verify_type is "vhost queue > virtio queue", the vhost should forward at least one set of data """ - local_port = self.tester.get_local_port(self.dut_ports[0]) - self.tx_interface = self.tester.get_interface(local_port) + local_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tx_interface = self.tg_node.get_interface(local_port) for frame_size in self.frame_sizes: info = "Running test %s, and %d frame size." % ( self.running_case, frame_size, ) self.logger.info(info) - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) payload_size = ( frame_size - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) - pkts = Packet() - pkt1 = Packet() - pkt1.assign_layers(["ether", "ipv4", "udp", "raw"]) - pkt1.config_layers( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder1 = ScapyPacketBuilder() + scapy_pkt_builder1.assign_layers(["ether", "ipv4", "udp", "raw"]) + scapy_pkt_builder1.config_layers( [ ("ether", {"dst": "%s" % self.virtio1_mac}), ("ipv4", {"dst": "1.1.1.1"}), @@ -235,9 +235,9 @@ class TestVhostMultiQueueQemu(TestCase): ("raw", {"payload": ["01"] * int("%d" % payload_size)}), ] ) - pkt2 = Packet() - pkt2.assign_layers(["ether", "ipv4", "udp", "raw"]) - pkt2.config_layers( + scapy_pkt_builder2 = ScapyPacketBuilder() + scapy_pkt_builder2.assign_layers(["ether", "ipv4", "udp", "raw"]) + scapy_pkt_builder2.config_layers( [ ("ether", {"dst": "%s" % self.virtio1_mac}), ("ipv4", {"dst": "1.1.1.20"}), @@ -245,9 +245,9 @@ class TestVhostMultiQueueQemu(TestCase): ("raw", {"payload": ["01"] * int("%d" % payload_size)}), ] ) - pkt3 = Packet() - pkt3.assign_layers(["ether", "ipv4", "udp", "raw"]) - pkt3.config_layers( + scapy_pkt_builder3 = ScapyPacketBuilder() + scapy_pkt_builder3.assign_layers(["ether", "ipv4", "udp", "raw"]) + scapy_pkt_builder3.config_layers( [ ("ether", {"dst": "%s" % self.virtio1_mac}), ("ipv4", {"dst": "1.1.1.7"}), @@ -255,9 +255,9 @@ class TestVhostMultiQueueQemu(TestCase): ("raw", {"payload": ["01"] * int("%d" % payload_size)}), ] ) - pkt4 = Packet() - pkt4.assign_layers(["ether", "ipv4", "udp", "raw"]) - pkt4.config_layers( + scapy_pkt_builder4 = ScapyPacketBuilder() + scapy_pkt_builder4.assign_layers(["ether", "ipv4", "udp", "raw"]) + scapy_pkt_builder4.config_layers( [ ("ether", {"dst": "%s" % self.virtio1_mac}), ("ipv4", {"dst": "1.1.1.8"}), @@ -266,12 +266,12 @@ class TestVhostMultiQueueQemu(TestCase): ] ) - pkt = [pkt1, pkt2, pkt3, pkt4] - for i in pkt: - pkts.pktgen.pkts.append(i.pktgen.pkt) - pkts.send_pkt(self.tester, tx_port=self.tx_interface, count=10) + scapy_pkt_builders = [scapy_pkt_builder1, scapy_pkt_builder2, scapy_pkt_builder3, scapy_pkt_builder4] + for i in scapy_pkt_builders: + scapy_pkt_builder.scapy_pkt_util.pkts.append(i.scapy_pkt_util.pkt) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tx_interface, count=10) - out = self.dut.send_expect("show port stats 0", "testpmd> ", 120) + out = self.sut_node.send_expect("show port stats 0", "testpmd> ", 120) print(out) rx_packet = re.search("RX-packets:\s*(\d*)", out) rx_num = int(rx_packet.group(1)) @@ -300,17 +300,17 @@ class TestVhostMultiQueueQemu(TestCase): self.start_onevm() self.get_vm_coremask() - self.vm_dut.send_expect( + self.vm_sut.send_expect( self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20 ) - self.vm_dut.send_expect("set fwd mac", "testpmd>", 20) - self.vm_dut.send_expect("start", "testpmd>") + self.vm_sut.send_expect("set fwd mac", "testpmd>", 20) + self.vm_sut.send_expect("start", "testpmd>") - self.dut.send_expect("stop", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("stop", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) time.sleep(5) self.vhost_performance() - self.vm_dut.kill_all() + self.vm_sut.kill_all() def test_dynamic_change_virtio_queue_size(self): """ @@ -325,34 +325,34 @@ class TestVhostMultiQueueQemu(TestCase): + " --rxq=1 --txq=1 --rss-ip --nb-cores=1" ) self.get_vm_coremask() - self.vm_dut.send_expect( + self.vm_sut.send_expect( self.vm_testpmd_queue_1 % self.vm_coremask, "testpmd>", 20 ) - self.vm_dut.send_expect("set fwd mac", "testpmd>", 20) - self.vm_dut.send_expect("start", "testpmd>") + self.vm_sut.send_expect("set fwd mac", "testpmd>", 20) + self.vm_sut.send_expect("start", "testpmd>") - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) res = self.pmd_out.wait_link_status_up("all", timeout=15) self.verify(res is True, "There has port link is down") self.send_and_verify("vhost queue > virtio queue") - self.vm_dut.send_expect("stop", "testpmd>", 20) - self.vm_dut.send_expect("port stop all", "testpmd>") - self.vm_dut.send_expect("port config all rxq 2", "testpmd>", 20) - self.vm_dut.send_expect("port config all txq 2", "testpmd>") - self.vm_dut.send_expect("port start all", "testpmd>", 20) - self.vm_dut.send_expect("start", "testpmd>") + self.vm_sut.send_expect("stop", "testpmd>", 20) + self.vm_sut.send_expect("port stop all", "testpmd>") + self.vm_sut.send_expect("port config all rxq 2", "testpmd>", 20) + self.vm_sut.send_expect("port config all txq 2", "testpmd>") + self.vm_sut.send_expect("port start all", "testpmd>", 20) + self.vm_sut.send_expect("start", "testpmd>") - self.dut.send_expect("stop", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("stop", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) res = self.pmd_out.wait_link_status_up("all", timeout=15) self.verify(res is True, "There has port link is down") - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) self.send_and_verify("vhost queue = virtio queue") - self.vm_dut.kill_all() - self.dut.send_expect("quit", "# ", 120) + self.vm_sut.kill_all() + self.sut_node.send_expect("quit", "# ", 120) def test_dynamic_change_vhost_queue_size(self): """ @@ -360,43 +360,43 @@ class TestVhostMultiQueueQemu(TestCase): """ self.queue_number = 2 vdev = [r"'net_vhost0,iface=%s/vhost-net,queues=2'" % self.base_dir] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, ports=[self.pci_info], vdevs=vdev ) para = " -- -i --rxq=1 --txq=1 --nb-cores=1" testcmd_start = self.app_path + eal_params + para - self.dut.send_expect(testcmd_start, "testpmd> ", 120) - self.dut.send_expect("set fwd mac", "testpmd> ", 120) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect(testcmd_start, "testpmd> ", 120) + self.sut_node.send_expect("set fwd mac", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.start_onevm() self.get_vm_coremask() - self.vm_dut.send_expect( + self.vm_sut.send_expect( self.vm_testpmd_vector % self.vm_coremask, "testpmd>", 20 ) - self.vm_dut.send_expect("set fwd mac", "testpmd>", 20) - self.vm_dut.send_expect("start", "testpmd>") - self.dut.send_expect("clear port stats all", "testpmd> ", 120) + self.vm_sut.send_expect("set fwd mac", "testpmd>", 20) + self.vm_sut.send_expect("start", "testpmd>") + self.sut_node.send_expect("clear port stats all", "testpmd> ", 120) res = self.pmd_out.wait_link_status_up("all", timeout=15) self.verify(res is True, "There has port link is down") self.send_and_verify("vhost queue < virtio queue") - self.dut.send_expect("stop", "testpmd>", 20) - self.dut.send_expect("port stop all", "testpmd>") - self.dut.send_expect("port config all rxq 2", "testpmd>", 20) - self.dut.send_expect("port config all txq 2", "testpmd>") - self.dut.send_expect("port start all", "testpmd>", 20) - self.dut.send_expect("start", "testpmd>") - self.dut.send_expect("clear port stats all", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>", 20) + self.sut_node.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("port config all rxq 2", "testpmd>", 20) + self.sut_node.send_expect("port config all txq 2", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd>", 20) + self.sut_node.send_expect("start", "testpmd>") + self.sut_node.send_expect("clear port stats all", "testpmd>") res = self.pmd_out.wait_link_status_up("all", timeout=15) self.verify(res is True, "There has port link is down") self.send_and_verify("vhost queue = virtio queue") - self.vm_dut.kill_all() - self.dut.send_expect("quit", "# ", 120) + self.vm_sut.kill_all() + self.sut_node.send_expect("quit", "# ", 120) def tear_down(self): """ @@ -405,7 +405,7 @@ class TestVhostMultiQueueQemu(TestCase): """ if hasattr(self, "vm"): self.vm.stop() - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_vhost_pmd_xstats.py b/tests/TestSuite_vhost_pmd_xstats.py index 46258924..3923dbc8 100644 --- a/tests/TestSuite_vhost_pmd_xstats.py +++ b/tests/TestSuite_vhost_pmd_xstats.py @@ -13,9 +13,9 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.qemu_kvm import QEMUKvm +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase @@ -28,45 +28,45 @@ class TestVhostPmdXstats(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.unbind_ports = copy.deepcopy(self.dut_ports) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.unbind_ports = copy.deepcopy(self.sut_ports) self.unbind_ports.remove(0) - self.dut.unbind_interfaces_linux(self.unbind_ports) - txport = self.tester.get_local_port(self.dut_ports[0]) - self.txItf = self.tester.get_interface(txport) + self.sut_node.unbind_interfaces_linux(self.unbind_ports) + txport = self.tg_node.get_local_port(self.sut_ports[0]) + self.txItf = self.tg_node.get_interface(txport) self.scapy_num = 0 - self.dmac = self.dut.get_mac_address(self.dut_ports[0]) + self.dmac = self.sut_node.get_mac_address(self.sut_ports[0]) self.virtio1_mac = "52:54:00:00:00:01" self.core_config = "1S/6C/1T" - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.ports_socket] ) self.verify(self.cores_num >= 6, "There has not enough cores to test this case") - self.core_list = self.dut.get_core_list( + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_user = self.core_list[0:3] self.core_list_host = self.core_list[3:6] - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) def set_up(self): """ Run before each test case. Launch vhost sample using default params """ - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -76,9 +76,9 @@ class TestVhostPmdXstats(TestCase): Send a packet to port """ self.scapy_num += 1 - pkt = Packet(pkt_type="TCP", pkt_len=pktsize) - pkt.config_layer("ether", {"dst": dmac}) - pkt.send_pkt(self.tester, tx_port=self.txItf, count=num) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="TCP", pkt_len=pktsize) + scapy_pkt_builder.config_layer("ether", {"dst": dmac}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf, count=num) def send_verify(self, scope, mun): """ @@ -150,7 +150,7 @@ class TestVhostPmdXstats(TestCase): scope = re.search(r"(?<=rx_)\w+(?=_packets)", cat).group(0) pktsize = int(re.search(r"(?<=rx_size_)\d+", cat).group(0)) if pktsize > 1518: - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %d" % (self.txItf, ETHER_JUMBO_FRAME_MTU), "# " ) types = ["ff:ff:ff:ff:ff:ff", "01:00:00:33:00:01"] @@ -164,7 +164,7 @@ class TestVhostPmdXstats(TestCase): self.scapy_send_packet(int(pktsize + 4), self.dmac, 10000) self.send_verify(scope, 10000) self.clear_port_xstats(scope) - self.tester.send_expect( + self.tg_node.send_expect( "ifconfig %s mtu %d" % (self.txItf, DEFAULT_JUMBO_FRAME_MTU), "# " ) @@ -367,11 +367,11 @@ class TestVhostPmdXstats(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user0) diff --git a/tests/TestSuite_vhost_user_interrupt.py b/tests/TestSuite_vhost_user_interrupt.py index 8fe55381..671db6cc 100644 --- a/tests/TestSuite_vhost_user_interrupt.py +++ b/tests/TestSuite_vhost_user_interrupt.py @@ -21,17 +21,17 @@ class TestVhostUserInterrupt(TestCase): """ self.queues = 1 - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) self.vmac = "00:11:22:33:44:10" - self.pci_info = self.dut.ports_info[0]["pci"] + self.pci_info = self.sut_node.ports_info[0]["pci"] self.prepare_l3fwd_power() - self.app_l3fwd_power_path = self.dut.apps_name["l3fwd-power"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_l3fwd_power_path = self.sut_node.apps_name["l3fwd-power"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) # get cbdma device self.cbdma_dev_infos = [] self.dmas_info = None @@ -43,14 +43,14 @@ class TestVhostUserInterrupt(TestCase): """ # Clean the execution ENV self.verify_info = [] - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.vhost = self.dut.new_session(suite="vhost-l3fwd") - self.virtio_user = self.dut.new_session(suite="virtio-user") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.vhost = self.sut_node.new_session(suite="vhost-l3fwd") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") def prepare_l3fwd_power(self): - out = self.dut.build_dpdk_apps("examples/l3fwd-power") + out = self.sut_node.build_dpdk_apps("examples/l3fwd-power") self.verify("Error" not in out, "compilation l3fwd-power error") def get_core_list(self): @@ -62,7 +62,7 @@ class TestVhostUserInterrupt(TestCase): self.verify( self.cores_num >= need_num, "There has not enought cores to test this case" ) - core_list = self.dut.get_core_list(self.core_config) + core_list = self.sut_node.get_core_list(self.core_config) self.core_list_virtio = core_list[0 : self.queues + 1] self.core_list_l3fwd = core_list[self.queues + 1 : need_num] @@ -78,11 +78,11 @@ class TestVhostUserInterrupt(TestCase): % (self.vmac, self.queues) ) if cbdma == True: - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_virtio, prefix="virtio", no_pci=True, vdevs=[vdev] ) else: - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_virtio, prefix="virtio", no_pci=True, @@ -101,7 +101,7 @@ class TestVhostUserInterrupt(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -130,7 +130,7 @@ class TestVhostUserInterrupt(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -139,11 +139,11 @@ class TestVhostUserInterrupt(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -152,7 +152,7 @@ class TestVhostUserInterrupt(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -179,14 +179,14 @@ class TestVhostUserInterrupt(TestCase): self.queues, self.dmas_info, ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_l3fwd, ports=self.cbdma_dev_infos[0:4], vdevs=[vdev], ) else: vdev = "net_vhost0,iface=vhost-net,queues=%d,client=1" % self.queues - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_l3fwd, no_pci=True, vdevs=[vdev] ) para = " -- -p 0x1 --parse-ptype 1 --config '%s' --interrupt-only" % config_info @@ -237,8 +237,8 @@ class TestVhostUserInterrupt(TestCase): def close_testpmd_and_session(self): self.virtio_user.send_expect("quit", "#", 20) - self.dut.close_session(self.vhost) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost) + self.sut_node.close_session(self.virtio_user) def test_wake_up_split_ring_vhost_user_core_with_l3fwd_power_sample(self): """ @@ -319,9 +319,9 @@ class TestVhostUserInterrupt(TestCase): Run after each test case. """ self.close_testpmd_and_session() - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.kill_all() + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.kill_all() self.bind_cbdma_device_to_kernel() def tear_down_all(self): diff --git a/tests/TestSuite_vhost_user_live_migration.py b/tests/TestSuite_vhost_user_live_migration.py index b566374f..218096a0 100644 --- a/tests/TestSuite_vhost_user_live_migration.py +++ b/tests/TestSuite_vhost_user_live_migration.py @@ -8,7 +8,7 @@ import time import framework.utils as utils from framework.config import UserConf -from framework.exception import VirtDutInitException +from framework.exception import VirtSutInitException from framework.settings import CONFIG_ROOT_PATH from framework.test_case import TestCase from framework.virt_common import VM @@ -16,16 +16,16 @@ from framework.virt_common import VM class TestVhostUserLiveMigration(TestCase): def set_up_all(self): - # verify at least two duts - self.verify(len(self.duts) >= 2, "Insufficient duts for live migration!!!") - self.host_dut = self.duts[0] - self.backup_dut = self.duts[1] - - # each dut required one ports - host_dut_ports = self.host_dut.get_ports() - backup_dut_ports = self.backup_dut.get_ports() + # verify at least two SUTs + self.verify(len(self.sut_nodes) >= 2, "Insufficient SUTs for live migration!!!") + self.host_sut = self.sut_nodes[0] + self.backup_sut = self.sut_nodes[1] + + # each SUT required one ports + host_sut_ports = self.host_sut.get_ports() + backup_sut_ports = self.backup_sut.get_ports() self.verify( - len(host_dut_ports) >= 1 and len(backup_dut_ports) >= 1, + len(host_sut_ports) >= 1 and len(backup_sut_ports) >= 1, "Insufficient ports for testing", ) @@ -38,51 +38,51 @@ class TestVhostUserLiveMigration(TestCase): self.config_mount_server() self.config_mount_client() - host_dut_port = host_dut_ports[0] - host_dut_ip = self.host_dut.crb["My IP"] - backup_dut_port = backup_dut_ports[0] - self.backup_dut_ip = self.backup_dut.crb["My IP"] + host_sut_port = host_sut_ports[0] + host_sut_ip = self.host_sut.node["My IP"] + backup_sut_port = backup_sut_ports[0] + self.backup_sut_ip = self.backup_sut.node["My IP"] - host_tport = self.tester.get_local_port_bydut(host_dut_port, host_dut_ip) - backup_tport = self.tester.get_local_port_bydut( - backup_dut_port, self.backup_dut_ip + host_tport = self.tg_node.get_local_port_bysut(host_sut_port, host_sut_ip) + backup_tport = self.tg_node.get_local_port_bysut( + backup_sut_port, self.backup_sut_ip ) - self.host_tintf = self.tester.get_interface(host_tport) - self.backup_tintf = self.tester.get_interface(backup_tport) + self.host_tintf = self.tg_node.get_interface(host_tport) + self.backup_tintf = self.tg_node.get_interface(backup_tport) - self.host_pci_info = self.host_dut.ports_info[0]["pci"] - self.backup_pci_info = self.backup_dut.ports_info[0]["pci"] + self.host_pci_info = self.host_sut.ports_info[0]["pci"] + self.backup_pci_info = self.backup_sut.ports_info[0]["pci"] self.virio_mac = "52:54:00:00:00:01" self.queue_number = 1 - self.vm_dut_host = None + self.vm_sut_host = None self.backup_vm = None self.screen_name = "migration" - self.base_dir = self.dut.base_dir.replace("~", "/root") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") host_socket_num = len( - set([int(core["socket"]) for core in self.host_dut.cores]) + set([int(core["socket"]) for core in self.host_sut.cores]) ) backup_socket_num = len( - set([int(core["socket"]) for core in self.backup_dut.cores]) + set([int(core["socket"]) for core in self.backup_sut.cores]) ) self.host_socket_mem = ",".join(["1024"] * host_socket_num) self.backup_socket_mem = ",".join(["1024"] * backup_socket_num) - self.testpmd_path = self.dut.apps_name["test-pmd"] + self.testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.testpmd_path.split("/")[-1] def set_up(self): - self.host_dut.send_expect("rm ./vhost-net*", "# ", 30) - self.backup_dut.send_expect("rm ./vhost-net*", "# ", 30) + self.host_sut.send_expect("rm ./vhost-net*", "# ", 30) + self.backup_sut.send_expect("rm ./vhost-net*", "# ", 30) self.migration_done = False def config_mount_server(self): """ get the mount server config from file /etc/exports - if not config the mount info of host_dut and backup_dut, config it + if not config the mount info of host_sut and backup_sut, config it """ config = "%s %s(rw,sync,no_root_squash)" % ( self.share_path, - self.backup_dut.crb["IP"], + self.backup_sut.node["IP"], ) try: fd = open("/etc/exports", "r+") @@ -104,22 +104,22 @@ class TestVhostUserLiveMigration(TestCase): """ config the mount client to access the mount server """ - out = self.backup_dut.send_expect("ls -d %s" % self.mount_path, "# ") + out = self.backup_sut.send_expect("ls -d %s" % self.mount_path, "# ") if "No such file or directory" in out: - self.backup_dut.send_expect("mkdir -p %s" % self.mount_path, "# ") + self.backup_sut.send_expect("mkdir -p %s" % self.mount_path, "# ") config = "mount -t nfs -o nolock,vers=4 %s:%s %s" % ( - self.host_dut.crb["IP"], + self.host_sut.node["IP"], self.share_path, self.mount_path, ) - self.host_dut.send_expect("service nfs-server restart", "# ") - self.backup_dut.send_expect("service nfs-server restart", "# ") - self.backup_dut.send_expect("umount %s" % self.mount_path, "# ") - self.backup_dut.send_expect(config, "# ") + self.host_sut.send_expect("service nfs-server restart", "# ") + self.backup_sut.send_expect("service nfs-server restart", "# ") + self.backup_sut.send_expect("umount %s" % self.mount_path, "# ") + self.backup_sut.send_expect(config, "# ") time.sleep(2) # verify the mount result - out_host = self.host_dut.send_expect("ls %s" % self.share_path, "#") - out_backup = self.backup_dut.send_expect("ls %s" % self.mount_path, "#") + out_host = self.host_sut.send_expect("ls %s" % self.share_path, "#") + out_backup = self.backup_sut.send_expect("ls %s" % self.mount_path, "#") self.verify( out_host == out_backup, "the mount action failed, please confrim it" ) @@ -127,16 +127,16 @@ class TestVhostUserLiveMigration(TestCase): def get_core_list(self): core_number = self.queue_number + 1 core_config = "1S/%dC/1T" % core_number - self.core_list0 = self.duts[0].get_core_list(core_config) - self.core_list1 = self.duts[1].get_core_list(core_config) + self.core_list0 = self.sut_nodes[0].get_core_list(core_config) + self.core_list1 = self.sut_nodes[1].get_core_list(core_config) self.verify( len(self.core_list0) >= core_number and len(self.core_list1) >= core_number, - "There have not enough cores to start testpmd on duts", + "There have not enough cores to start testpmd on SUTs", ) - def launch_testpmd_as_vhost_on_both_dut(self): + def launch_testpmd_as_vhost_on_both_sut(self): """ - start testpmd as vhost user on host_dut and backup_dut + start testpmd as vhost user on host_sut and backup_sut """ self.get_core_list() testcmd = self.testpmd_path + " " @@ -149,13 +149,13 @@ class TestVhostUserLiveMigration(TestCase): self.queue_number, self.queue_number, ) - eal_params_first = self.dut.create_eal_parameters( + eal_params_first = self.sut_node.create_eal_parameters( cores=self.core_list0, prefix="vhost", ports=[self.host_pci_info], vdevs=vdev, ) - eal_params_secondary = self.dut.create_eal_parameters( + eal_params_secondary = self.sut_node.create_eal_parameters( cores=self.core_list1, prefix="vhost", ports=[self.backup_pci_info], @@ -163,22 +163,22 @@ class TestVhostUserLiveMigration(TestCase): ) host_cmd_line = testcmd + eal_params_first + para backup_cmd_line = testcmd + eal_params_secondary + para - self.host_dut.send_expect(host_cmd_line, "testpmd> ", 30) - self.backup_dut.send_expect(backup_cmd_line, "testpmd> ", 30) + self.host_sut.send_expect(host_cmd_line, "testpmd> ", 30) + self.backup_sut.send_expect(backup_cmd_line, "testpmd> ", 30) - def start_testpmd_with_fwd_mode_on_both_dut(self, fwd_mode="io"): - self.host_dut.send_expect("set fwd %s" % fwd_mode, "testpmd> ", 30) - self.host_dut.send_expect("start", "testpmd> ", 30) - self.backup_dut.send_expect("set fwd %s" % fwd_mode, "testpmd> ", 30) - self.backup_dut.send_expect("start", "testpmd> ", 30) + def start_testpmd_with_fwd_mode_on_both_sut(self, fwd_mode="io"): + self.host_sut.send_expect("set fwd %s" % fwd_mode, "testpmd> ", 30) + self.host_sut.send_expect("start", "testpmd> ", 30) + self.backup_sut.send_expect("set fwd %s" % fwd_mode, "testpmd> ", 30) + self.backup_sut.send_expect("start", "testpmd> ", 30) - def setup_vm_env_on_both_dut(self, driver="default", packed=False): + def setup_vm_env_on_both_sut(self, driver="default", packed=False): """ Create testing environment on Host and Backup """ try: # set up host virtual machine - self.host_vm = VM(self.duts[0], "host", "%s" % self.suite_name) + self.host_vm = VM(self.sut_nodes[0], "host", "%s" % self.suite_name) vhost_params = {} vhost_params["driver"] = "vhost-user" vhost_params["opt_path"] = self.base_dir + "/vhost-net" @@ -195,13 +195,13 @@ class TestVhostUserLiveMigration(TestCase): self.host_vm.set_vm_device(**vhost_params) self.logger.info("Start virtual machine on host") - self.vm_dut_host = self.host_vm.start() + self.vm_sut_host = self.host_vm.start() - if self.vm_dut_host is None: + if self.vm_sut_host is None: raise Exception("Set up host VM ENV failed!") self.logger.info("Start virtual machine on backup host") # set up backup virtual machine - self.backup_vm = VM(self.duts[1], "backup", "vhost_user_live_migration") + self.backup_vm = VM(self.sut_nodes[1], "backup", "vhost_user_live_migration") vhost_params = {} vhost_params["driver"] = "vhost-user" vhost_params["opt_path"] = self.base_dir + "/vhost-net" @@ -215,7 +215,7 @@ class TestVhostUserLiveMigration(TestCase): self.backup_vm.start() except Exception as ex: - if ex is VirtDutInitException: + if ex is VirtSutInitException: self.host_vm.stop() self.host_vm = None # no session created yet, call internal stop function @@ -228,9 +228,9 @@ class TestVhostUserLiveMigration(TestCase): def destroy_vm_env(self): self.logger.info("Stop virtual machine on host") try: - if self.vm_dut_host is not None: + if self.vm_sut_host is not None: if not self.migration_done: - self.vm_dut_host.send_expect("pkill screen", "# ") + self.vm_sut_host.send_expect("pkill screen", "# ") self.host_vm.stop() self.host_vm = None except Exception as e: @@ -240,72 +240,72 @@ class TestVhostUserLiveMigration(TestCase): try: if self.backup_vm is not None: if self.migration_done: - self.vm_dut_backup.send_expect("pkill screen", "# ") - self.vm_dut_backup.kill_all() + self.vm_sut_backup.send_expect("pkill screen", "# ") + self.vm_sut_backup.kill_all() self.backup_vm.stop() self.backup_vm = None except Exception as e: self.logger.error("stop the qemu backup failed as %s" % str(e)) # after vm stopped, stop vhost testpmd - for crb in self.duts: - crb.send_expect("quit", "# ") - crb.kill_all() + for node in self.sut_nodes: + node.send_expect("quit", "# ") + node.kill_all() - def bind_nic_driver_of_vm(self, crb, driver=""): + def bind_nic_driver_of_vm(self, node, driver=""): # modprobe vfio driver - ports = crb.get_ports() + ports = node.get_ports() if driver == "vfio-pci": - crb.send_expect("modprobe vfio-pci", "# ") + node.send_expect("modprobe vfio-pci", "# ") for port in ports: - netdev = crb.ports_info[port]["port"] + netdev = node.ports_info[port]["port"] driver_now = netdev.get_nic_driver() if driver_now != driver: netdev.bind_driver(driver) def send_pkts_in_bg(self): """ - send packet from tester + send packet from TG """ sendp_fmt = "sendp([Ether(dst='%s')/IP(src='%s', dst='%s')/UDP(sport=11,dport=12)/('x'*18)], iface='%s', loop=1, inter=0.5)" sendp_cmd = sendp_fmt % (self.virio_mac, "1.1.1.1", "2.2.2.2", self.host_tintf) - self.send_pks_session = self.tester.create_session("scapy1") + self.send_pks_session = self.tg_node.create_session("scapy1") self.send_pks_session.send_expect("scapy", ">>>") self.send_pks_session.send_command(sendp_cmd) if self.host_tintf != self.backup_tintf: sendp_cmd = sendp_fmt % {"DMAC": self.virio_mac, "INTF": self.backup_tintf} - self.send_pks_session2 = self.tester.create_session("scapy2") + self.send_pks_session2 = self.tg_node.create_session("scapy2") self.send_pks_session2.send_expect("scapy", ">>>") self.send_pks_session2.send_command(sendp_cmd) - def stop_send_pkts_on_tester(self): - self.tester.send_expect("pkill scapy", "# ") + def stop_send_pkts_on_tg(self): + self.tg_node.send_expect("pkill scapy", "# ") if getattr(self, "scapy1", None): - self.tester.destroy_session(self.send_pks_session) + self.tg_node.destroy_session(self.send_pks_session) if getattr(self, "scapy2", None): - self.tester.destroy_session(self.send_pks_session2) + self.tg_node.destroy_session(self.send_pks_session2) - def start_testpmd_on_vm(self, vm_dut): - vm_dut.send_expect("export TERM=screen", "# ") - vm_dut.send_expect("screen -S %s" % self.screen_name, "# ", 120) + def start_testpmd_on_vm(self, vm_sut): + vm_sut.send_expect("export TERM=screen", "# ") + vm_sut.send_expect("screen -S %s" % self.screen_name, "# ", 120) vm_testpmd = self.testpmd_path + " -c 0x3 -n 4 -- -i" - vm_dut.send_expect(vm_testpmd, "testpmd> ", 120) - vm_dut.send_expect("set fwd rxonly", "testpmd> ", 30) - vm_dut.send_expect("set promisc all off", "testpmd> ", 30) - vm_dut.send_expect("start", "testpmd> ", 30) - vm_dut.send_command("^a") - vm_dut.send_command("^d") + vm_sut.send_expect(vm_testpmd, "testpmd> ", 120) + vm_sut.send_expect("set fwd rxonly", "testpmd> ", 30) + vm_sut.send_expect("set promisc all off", "testpmd> ", 30) + vm_sut.send_expect("start", "testpmd> ", 30) + vm_sut.send_command("^a") + vm_sut.send_command("^d") - def verify_dpdk(self, vm_dut): - vm_dut.send_expect("export TERM=screen", "# ") - vm_dut.send_command("screen -r %s" % self.screen_name) + def verify_dpdk(self, vm_sut): + vm_sut.send_expect("export TERM=screen", "# ") + vm_sut.send_command("screen -r %s" % self.screen_name) stats_pat = re.compile("RX-packets: (\d+)") - vm_dut.send_expect("clear port stats all", "testpmd> ") + vm_sut.send_expect("clear port stats all", "testpmd> ") time.sleep(5) - out = vm_dut.send_expect("show port stats 0", "testpmd> ") + out = vm_sut.send_expect("show port stats 0", "testpmd> ") print(out) m = stats_pat.search(out) if m: @@ -314,46 +314,46 @@ class TestVhostUserLiveMigration(TestCase): num_received = 0 self.verify(num_received > 0, "Not receive packets as expected!!!") - vm_dut.send_command("^a") - vm_dut.send_command("^d") + vm_sut.send_command("^a") + vm_sut.send_command("^d") - def verify_kernel(self, vm_dut): + def verify_kernel(self, vm_sut): """ Function to verify packets received by virtIO """ - vm_dut.send_expect("export TERM=screen", "# ") - vm_dut.send_command("screen -r %s" % self.screen_name) + vm_sut.send_expect("export TERM=screen", "# ") + vm_sut.send_command("screen -r %s" % self.screen_name) # clean the output info before verify time.sleep(5) - out = vm_dut.get_session_output(timeout=1) + out = vm_sut.get_session_output(timeout=1) print(out) num = out.count("UDP") self.verify(num > 0, "Not receive packets as expected!!!") - vm_dut.send_command("^a") - vm_dut.send_command("^d") + vm_sut.send_command("^a") + vm_sut.send_command("^d") - def start_tcpdump_on_vm(self, vm_dut): - vm_dut.send_expect("export TERM=screen", "# ") - vm_dut.send_expect("screen -S %s" % self.screen_name, "# ", 120) + def start_tcpdump_on_vm(self, vm_sut): + vm_sut.send_expect("export TERM=screen", "# ") + vm_sut.send_expect("screen -S %s" % self.screen_name, "# ", 120) # get host interface - vm_intf = vm_dut.ports_info[0]["port"].get_interface_name() + vm_intf = vm_sut.ports_info[0]["port"].get_interface_name() # start tcpdump the interface - vm_dut.send_expect("ifconfig %s up" % vm_intf, "# ") + vm_sut.send_expect("ifconfig %s up" % vm_intf, "# ") direct_pat = re.compile(r"(\s+)\[ (\S+) in\|out\|inout \]") - vm_dut.send_expect("tcpdump -h", "# ") - out = vm_dut.get_session_output(timeout=1) + vm_sut.send_expect("tcpdump -h", "# ") + out = vm_sut.get_session_output(timeout=1) m = direct_pat.search(out) if m: direct_param = "-" + m.group(2)[1] + " in" else: direct_param = "" - vm_dut.send_expect( + vm_sut.send_expect( "tcpdump -i %s %s -v" % (vm_intf, direct_param), "listening on", 120 ) time.sleep(2) - vm_dut.send_command("^a") - vm_dut.send_command("^d") + vm_sut.send_command("^a") + vm_sut.send_command("^d") def send_and_verify(self, verify_fun, multi_queue=False): """ @@ -362,18 +362,18 @@ class TestVhostUserLiveMigration(TestCase): verify vm_host can recevied packets during migration verify vm_backup can recevied packets after migration """ - # send packets from tester + # send packets from TG self.send_pkts_in_bg() # verify host virtio-net work fine - verify_fun(self.vm_dut_host) + verify_fun(self.vm_sut_host) self.logger.info("Migrate host VM to backup host") # start live migration - self.host_vm.start_migration(self.backup_dut_ip, self.backup_vm.migrate_port) + self.host_vm.start_migration(self.backup_sut_ip, self.backup_vm.migrate_port) if multi_queue is True: - vm_intf = self.vm_dut_host.ports_info[0]["port"].get_interface_name() - out = self.vm_dut_host.send_expect( + vm_intf = self.vm_sut_host.ports_info[0]["port"].get_interface_name() + out = self.vm_sut_host.send_expect( "ethtool -L %s combined 4" % vm_intf, "# " ) self.verify( @@ -389,10 +389,10 @@ class TestVhostUserLiveMigration(TestCase): self.logger.info("Migration process done, then go to backup VM") # connected backup VM - self.vm_dut_backup = self.backup_vm.migrated_start(set_target=False) + self.vm_sut_backup = self.backup_vm.migrated_start(set_target=False) # make sure still can receive packets - verify_fun(self.vm_dut_backup) + verify_fun(self.vm_sut_backup) def test_migrate_with_split_ring_virtio_net(self): """ @@ -400,42 +400,42 @@ class TestVhostUserLiveMigration(TestCase): Verify before/in/after migration, device with kernel driver can receive packets """ self.queue_number = 1 - self.launch_testpmd_as_vhost_on_both_dut() - self.start_testpmd_with_fwd_mode_on_both_dut() - self.setup_vm_env_on_both_dut() + self.launch_testpmd_as_vhost_on_both_sut() + self.start_testpmd_with_fwd_mode_on_both_sut() + self.setup_vm_env_on_both_sut() # bind virtio-net back to virtio-pci - self.bind_nic_driver_of_vm(self.vm_dut_host, driver="") + self.bind_nic_driver_of_vm(self.vm_sut_host, driver="") # start screen and tcpdump on vm - self.start_tcpdump_on_vm(self.vm_dut_host) + self.start_tcpdump_on_vm(self.vm_sut_host) self.send_and_verify(self.verify_kernel) def test_adjust_split_ring_virtio_net_queue_numbers_while_migreting_with_virtio_net( self, ): self.queue_number = 4 - self.launch_testpmd_as_vhost_on_both_dut() - self.start_testpmd_with_fwd_mode_on_both_dut() - self.setup_vm_env_on_both_dut() + self.launch_testpmd_as_vhost_on_both_sut() + self.start_testpmd_with_fwd_mode_on_both_sut() + self.setup_vm_env_on_both_sut() # bind virtio-net back to virtio-pci - self.bind_nic_driver_of_vm(self.vm_dut_host, driver="") - self.start_tcpdump_on_vm(self.vm_dut_host) + self.bind_nic_driver_of_vm(self.vm_sut_host, driver="") + self.start_tcpdump_on_vm(self.vm_sut_host) self.send_and_verify(self.verify_kernel, True) def test_migrate_with_split_ring_virtio_pmd(self): self.queue_number = 1 - self.launch_testpmd_as_vhost_on_both_dut() - self.start_testpmd_with_fwd_mode_on_both_dut() - self.setup_vm_env_on_both_dut() - self.start_testpmd_on_vm(self.vm_dut_host) + self.launch_testpmd_as_vhost_on_both_sut() + self.start_testpmd_with_fwd_mode_on_both_sut() + self.setup_vm_env_on_both_sut() + self.start_testpmd_on_vm(self.vm_sut_host) self.send_and_verify(self.verify_dpdk) def test_migrate_with_packed_ring_virtio_pmd(self): self.queue_number = 1 - self.launch_testpmd_as_vhost_on_both_dut() - self.start_testpmd_with_fwd_mode_on_both_dut() - self.setup_vm_env_on_both_dut(packed=True) - self.start_testpmd_on_vm(self.vm_dut_host) + self.launch_testpmd_as_vhost_on_both_sut() + self.start_testpmd_with_fwd_mode_on_both_sut() + self.setup_vm_env_on_both_sut(packed=True) + self.start_testpmd_on_vm(self.vm_sut_host) self.send_and_verify(self.verify_dpdk) def test_migrate_with_packed_ring_virtio_net(self): @@ -444,31 +444,31 @@ class TestVhostUserLiveMigration(TestCase): Verify before/in/after migration, device with kernel driver can receive packets """ self.queue_number = 1 - self.launch_testpmd_as_vhost_on_both_dut() - self.start_testpmd_with_fwd_mode_on_both_dut() - self.setup_vm_env_on_both_dut(packed=True) + self.launch_testpmd_as_vhost_on_both_sut() + self.start_testpmd_with_fwd_mode_on_both_sut() + self.setup_vm_env_on_both_sut(packed=True) # bind virtio-net back to virtio-pci - self.bind_nic_driver_of_vm(self.vm_dut_host, driver="") + self.bind_nic_driver_of_vm(self.vm_sut_host, driver="") # start screen and tcpdump on vm - self.start_tcpdump_on_vm(self.vm_dut_host) + self.start_tcpdump_on_vm(self.vm_sut_host) self.send_and_verify(self.verify_kernel) def test_adjust_packed_ring_virtio_net_queue_numbers_while_migreting_with_virtio_net( self, ): self.queue_number = 4 - self.launch_testpmd_as_vhost_on_both_dut() - self.start_testpmd_with_fwd_mode_on_both_dut() - self.setup_vm_env_on_both_dut(packed=True) + self.launch_testpmd_as_vhost_on_both_sut() + self.start_testpmd_with_fwd_mode_on_both_sut() + self.setup_vm_env_on_both_sut(packed=True) # bind virtio-net back to virtio-pci - self.bind_nic_driver_of_vm(self.vm_dut_host, driver="") - self.start_tcpdump_on_vm(self.vm_dut_host) + self.bind_nic_driver_of_vm(self.vm_sut_host, driver="") + self.start_tcpdump_on_vm(self.vm_sut_host) self.send_and_verify(self.verify_kernel, True) def tear_down(self): self.destroy_vm_env() - self.duts[0].send_expect("killall -s INT qemu-system-x86_64", "#") - self.duts[1].send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_nodes[0].send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_nodes[1].send_expect("killall -s INT qemu-system-x86_64", "#") pass def tear_down_all(self): diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt.py b/tests/TestSuite_vhost_virtio_pmd_interrupt.py index 9beecdc9..04409c32 100644 --- a/tests/TestSuite_vhost_virtio_pmd_interrupt.py +++ b/tests/TestSuite_vhost_virtio_pmd_interrupt.py @@ -11,9 +11,9 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -25,27 +25,27 @@ class TestVhostVirtioPmdInterrupt(TestCase): self.fix_ip = False self.nb_cores = 4 self.queues = 4 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.ports_socket] ) - self.pci_info = self.dut.ports_info[0]["pci"] - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.logger.info( "Please comfirm the kernel of vm greater than 4.8.0 and enable vfio-noiommu in kernel" ) self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.app_l3fwd_power_path = self.dut.apps_name["l3fwd-power"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.app_l3fwd_power_path = self.sut_node.apps_name["l3fwd-power"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] self.device_str = None @@ -56,11 +56,11 @@ class TestVhostVirtioPmdInterrupt(TestCase): """ # Clean the execution ENV self.verify_info = [] - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.vm_dut = None + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.vm_sut = None def get_core_list(self): """ @@ -71,17 +71,17 @@ class TestVhostVirtioPmdInterrupt(TestCase): self.cores_num >= (self.nb_cores + 1), "There has not enough cores to running case: %s" % self.running_case, ) - self.core_list = self.dut.get_core_list("all", socket=self.ports_socket) + self.core_list = self.sut_node.get_core_list("all", socket=self.ports_socket) def prepare_vm_env(self): """ rebuild l3fwd-power in vm and set the virtio-net driver """ - out = self.vm_dut.build_dpdk_apps("examples/l3fwd-power") + out = self.vm_sut.build_dpdk_apps("examples/l3fwd-power") self.verify("Error" not in out, "compilation l3fwd-power error") - self.vm_dut.send_expect("modprobe vfio enable_unsafe_noiommu_mode=1", "#") - self.vm_dut.send_expect("modprobe vfio-pci", "#") - self.vm_dut.ports_info[0]["port"].bind_driver("vfio-pci") + self.vm_sut.send_expect("modprobe vfio enable_unsafe_noiommu_mode=1", "#") + self.vm_sut.send_expect("modprobe vfio-pci", "#") + self.vm_sut.ports_info[0]["port"].bind_driver("vfio-pci") def start_testpmd_on_vhost(self, dmas=None): """ @@ -97,14 +97,14 @@ class TestVhostVirtioPmdInterrupt(TestCase): "'net_vhost0,iface=%s/vhost-net,queues=%d,dmas=[%s]'" % (self.base_dir, self.queues, dmas) ] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=device_str, vdevs=vdev ) else: vdev = [ "net_vhost0,iface=%s/vhost-net,queues=%d" % (self.base_dir, self.queues) ] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, ports=[self.pci_info], vdevs=vdev ) para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --rss-ip" % ( @@ -121,11 +121,11 @@ class TestVhostVirtioPmdInterrupt(TestCase): launch l3fwd-power with a virtual vhost device """ self.verify( - len(self.vm_dut.cores) >= self.nb_cores, + len(self.vm_sut.cores) >= self.nb_cores, "The vm done not has enought cores to use, please config it", ) core_config = "1S/%dC/1T" % self.nb_cores - core_list_l3fwd = self.vm_dut.get_core_list(core_config) + core_list_l3fwd = self.vm_sut.get_core_list(core_config) core_mask_l3fwd = utils.create_mask(core_list_l3fwd) res = True @@ -144,10 +144,10 @@ class TestVhostVirtioPmdInterrupt(TestCase): + "--config '%s' --no-numa --parse-ptype --interrupt-only" ) command_line_client = command_client % (core_mask_l3fwd, config_info) - self.vm_dut.get_session_output(timeout=2) - self.vm_dut.send_expect(command_line_client, "POWER", 40) + self.vm_sut.get_session_output(timeout=2) + self.vm_sut.send_expect(command_line_client, "POWER", 40) time.sleep(10) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() if "Error" in out and "Error opening" not in out: self.logger.error("Launch l3fwd-power sample error") res = False @@ -166,7 +166,7 @@ class TestVhostVirtioPmdInterrupt(TestCase): """ start qemus """ - self.vm = VM(self.dut, "vm0", self.suite_name) + self.vm = VM(self.sut_node, "vm0", self.suite_name) self.vm.load_config() vm_params = {} vm_params["driver"] = "vhost-user" @@ -188,8 +188,8 @@ class TestVhostVirtioPmdInterrupt(TestCase): try: # Due to we have change the params info before, # so need to start vm with load_config=False - self.vm_dut = self.vm.start(load_config=False) - if self.vm_dut is None: + self.vm_sut = self.vm.start(load_config=False) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -197,8 +197,8 @@ class TestVhostVirtioPmdInterrupt(TestCase): def check_related_cores_status_in_l3fwd(self, out_result, status, fix_ip): """ check the vcpu status - when tester send fix_ip packet, the cores in l3fwd only one can change the status - when tester send not fix_ip packets, all the cores in l3fwd will change the status + when TG send fix_ip packet, the cores in l3fwd only one can change the status + when TG send not fix_ip packets, all the cores in l3fwd will change the status """ change = 0 for i in range(len(self.verify_info)): @@ -238,24 +238,24 @@ class TestVhostVirtioPmdInterrupt(TestCase): def send_packets(self): tgen_input = [] if self.fix_ip is True: - pkt = Packet(pkt_type="UDP") + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") else: - pkt = Packet(pkt_type="IP_RAW") - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/interrupt.pcap" % self.out_path) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW") + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/interrupt.pcap" % self.out_path) tgen_input.append( (self.tx_port, self.tx_port, "%s/interrupt.pcap" % self.out_path) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() vm_config = self.set_fields() if self.fix_ip is True: vm_config = None streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config, self.tester.pktgen + tgen_input, 100, vm_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5, "duration": 20} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -268,7 +268,7 @@ class TestVhostVirtioPmdInterrupt(TestCase): # packets will distribute to all queues self.fix_ip = False self.send_packets() - out = self.vm_dut.get_session_output(timeout=5) + out = self.vm_sut.get_session_output(timeout=5) self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=False) self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=False) @@ -276,7 +276,7 @@ class TestVhostVirtioPmdInterrupt(TestCase): # packets will distribute to 1 queue self.fix_ip = True self.send_packets() - out = self.vm_dut.get_session_output(timeout=5) + out = self.vm_sut.get_session_output(timeout=5) self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=True) self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=True) @@ -284,7 +284,7 @@ class TestVhostVirtioPmdInterrupt(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) cbdma_dev_infos = re.findall("\s*(0000:\S+:\d+.\d+)", out) @@ -301,7 +301,7 @@ class TestVhostVirtioPmdInterrupt(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -310,11 +310,11 @@ class TestVhostVirtioPmdInterrupt(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -325,16 +325,16 @@ class TestVhostVirtioPmdInterrupt(TestCase): """ close all vms """ - if self.vm_dut is not None: - vm_dut2 = self.vm_dut.create_session(name="vm_dut2") - vm_dut2.send_expect("killall %s" % self.l3fwdpower_name, "# ", 10) - # self.vm_dut.send_expect("killall l3fwd-power", "# ", 60, alt_session=True) - self.vm_dut.send_expect("cp /tmp/main.c ./examples/l3fwd-power/", "#", 15) - out = self.vm_dut.build_dpdk_apps("examples/l3fwd-power") + if self.vm_sut is not None: + vm_sut2 = self.vm_sut.create_session(name="vm_sut2") + vm_sut2.send_expect("killall %s" % self.l3fwdpower_name, "# ", 10) + # self.vm_sut.send_expect("killall l3fwd-power", "# ", 60, alt_session=True) + self.vm_sut.send_expect("cp /tmp/main.c ./examples/l3fwd-power/", "#", 15) + out = self.vm_sut.build_dpdk_apps("examples/l3fwd-power") self.vm.stop() - self.dut.close_session(vm_dut2) + self.sut_node.close_session(vm_sut2) self.vhost_user.send_expect("quit", "#", 10) - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) def test_perf_virtio_pmd_interrupt_with_4queues(self): """ @@ -431,9 +431,9 @@ class TestVhostVirtioPmdInterrupt(TestCase): Run after each test case. """ self.stop_all_apps() - self.dut.kill_all() - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.kill_all() + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py b/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py index 779010ba..b85639eb 100644 --- a/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py +++ b/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py @@ -11,10 +11,10 @@ import re import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -26,33 +26,33 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): self.fix_ip = False self.nb_cores = 4 self.queues = 4 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.ports_socket] ) - self.core_list = self.dut.get_core_list("all", socket=self.ports_socket) + self.core_list = self.sut_node.get_core_list("all", socket=self.ports_socket) self.core_list_vhost = self.core_list[0:17] - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) self.logger.info( "Please comfirm the kernel of vm greater than 4.8.0 and enable vfio-noiommu in kernel" ) self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.app_l3fwd_power_path = self.dut.apps_name["l3fwd-power"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.app_l3fwd_power_path = self.sut_node.apps_name["l3fwd-power"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost_user) - self.vm_dut = None + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.vm_sut = None def set_up(self): """ @@ -60,30 +60,30 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): """ # Clean the execution ENV self.verify_info = [] - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") def prepare_vm_env(self): """ rebuild l3fwd-power in vm and set the virtio-net driver """ - out = self.vm_dut.build_dpdk_apps("examples/l3fwd-power") + out = self.vm_sut.build_dpdk_apps("examples/l3fwd-power") self.verify("Error" not in out, "compilation l3fwd-power error") - self.vm_dut.send_expect("modprobe vfio enable_unsafe_noiommu_mode=1", "#") - self.vm_dut.send_expect("modprobe vfio-pci", "#") - self.vm_dut.ports_info[0]["port"].bind_driver("vfio-pci") + self.vm_sut.send_expect("modprobe vfio enable_unsafe_noiommu_mode=1", "#") + self.vm_sut.send_expect("modprobe vfio-pci", "#") + self.vm_sut.ports_info[0]["port"].bind_driver("vfio-pci") def launch_l3fwd_power_in_vm(self): """ launch l3fwd-power with a virtual vhost device """ self.verify( - len(self.vm_dut.cores) >= self.nb_cores, + len(self.vm_sut.cores) >= self.nb_cores, "The vm done not has enought cores to use, please config it", ) core_config = "1S/%dC/1T" % self.nb_cores - core_list_l3fwd = self.vm_dut.get_core_list(core_config) + core_list_l3fwd = self.vm_sut.get_core_list(core_config) core_mask_l3fwd = utils.create_mask(core_list_l3fwd) res = True @@ -102,10 +102,10 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): + "--config '%s' --no-numa --parse-ptype --interrupt-only" ) command_line_client = command_client % (core_mask_l3fwd, config_info) - self.vm_dut.get_session_output(timeout=2) - self.vm_dut.send_expect(command_line_client, "POWER", 40) + self.vm_sut.get_session_output(timeout=2) + self.vm_sut.send_expect(command_line_client, "POWER", 40) time.sleep(10) - out = self.vm_dut.get_session_output() + out = self.vm_sut.get_session_output() if "Error" in out and "Error opening" not in out: self.logger.error("Launch l3fwd-power sample error") res = False @@ -124,7 +124,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): """ start qemus """ - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") self.vm.load_config() vm_params = {} vm_params["driver"] = "vhost-user" @@ -146,8 +146,8 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): try: # Due to we have change the params info before, # so need to start vm with load_config=False - self.vm_dut = self.vm.start(load_config=False) - if self.vm_dut is None: + self.vm_sut = self.vm.start(load_config=False) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -155,8 +155,8 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): def check_related_cores_status_in_l3fwd(self, out_result, status, fix_ip): """ check the vcpu status - when tester send fix_ip packet, the cores in l3fwd only one can change the status - when tester send not fix_ip packets, all the cores in l3fwd will change the status + when TG send fix_ip packet, the cores in l3fwd only one can change the status + when TG send not fix_ip packets, all the cores in l3fwd will change the status """ change = 0 for i in range(len(self.verify_info)): @@ -196,24 +196,24 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): def send_packets(self): tgen_input = [] if self.fix_ip is True: - pkt = Packet(pkt_type="UDP") + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") else: - pkt = Packet(pkt_type="IP_RAW") - pkt.config_layer("ether", {"dst": "%s" % self.dst_mac}) - pkt.save_pcapfile(self.tester, "%s/interrupt.pcap" % self.out_path) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IP_RAW") + scapy_pkt_builder.config_layer("ether", {"dst": "%s" % self.dst_mac}) + scapy_pkt_builder.save_pcapfile(self.tg_node, "%s/interrupt.pcap" % self.out_path) tgen_input.append( (self.tx_port, self.tx_port, "%s/interrupt.pcap" % self.out_path) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() vm_config = self.set_fields() if self.fix_ip is True: vm_config = None streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config, self.tester.pktgen + tgen_input, 100, vm_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5, "duration": 20} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -226,7 +226,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): # packets will distribute to all queues self.fix_ip = False self.send_packets() - out = self.vm_dut.get_session_output(timeout=5) + out = self.vm_sut.get_session_output(timeout=5) self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=False) self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=False) @@ -234,7 +234,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): # packets will distribute to 1 queue self.fix_ip = True self.send_packets() - out = self.vm_dut.get_session_output(timeout=5) + out = self.vm_sut.get_session_output(timeout=5) self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=True) self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=True) @@ -245,7 +245,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -269,7 +269,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -277,11 +277,11 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): ) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -291,14 +291,14 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): """ close all vms """ - if self.vm_dut is not None: - vm_dut2 = self.vm_dut.create_session(name="vm_dut2") - vm_dut2.send_expect("killall %s" % self.l3fwdpower_name, "# ", 10) - # self.vm_dut.send_expect("killall l3fwd-power", "# ", 60, alt_session=True) - self.vm_dut.send_expect("cp /tmp/main.c ./examples/l3fwd-power/", "#", 15) - out = self.vm_dut.build_dpdk_apps("examples/l3fwd-power") + if self.vm_sut is not None: + vm_sut2 = self.vm_sut.create_session(name="vm_sut2") + vm_sut2.send_expect("killall %s" % self.l3fwdpower_name, "# ", 10) + # self.vm_sut.send_expect("killall l3fwd-power", "# ", 60, alt_session=True) + self.vm_sut.send_expect("cp /tmp/main.c ./examples/l3fwd-power/", "#", 15) + out = self.vm_sut.build_dpdk_apps("examples/l3fwd-power") self.vm.stop() - self.dut.close_session(vm_dut2) + self.sut_node.close_session(vm_sut2) self.vhost_pmd.quit() def test_perf_virtio_interrupt_with_16_queues_and_cbdma_enabled(self): @@ -330,7 +330,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): ) vhost_eal_param = "--vdev 'eth_vhost0,iface=vhost-net,queues=16,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -365,7 +365,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): "--vdev 'net_vhost0,iface=vhost-net,queues=4,dmas=[txq0;txq1;txq2;txq3]'" ) ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -410,7 +410,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): ) vhost_eal_param = "--vdev 'eth_vhost0,iface=vhost-net,queues=16,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -431,13 +431,13 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase): Run after each test case. """ self.stop_all_apps() - self.dut.kill_all() - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.kill_all() + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) diff --git a/tests/TestSuite_vhost_virtio_user_interrupt.py b/tests/TestSuite_vhost_virtio_user_interrupt.py index fd4d9fff..ead8c9cd 100644 --- a/tests/TestSuite_vhost_virtio_user_interrupt.py +++ b/tests/TestSuite_vhost_virtio_user_interrupt.py @@ -20,28 +20,28 @@ class TestVirtioUserInterrupt(TestCase): run at the start of each test suite. """ self.core_config = "1S/4C/1T" - self.dut_ports = self.dut.get_ports() - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( self.cores_num >= 4, "There has not enought cores to test this case" ) - self.core_list = self.dut.get_core_list(self.core_config) + self.core_list = self.sut_node.get_core_list(self.core_config) self.core_list_vhost = self.core_list[0:2] self.core_list_l3fwd = self.core_list[2:4] self.core_mask_vhost = utils.create_mask(self.core_list_vhost) self.core_mask_l3fwd = utils.create_mask(self.core_list_l3fwd) self.core_mask_virtio = self.core_mask_l3fwd - self.pci_info = self.dut.ports_info[0]["pci"] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cbdma_dev_infos = [] self.dmas_info = None self.device_str = None self.prepare_l3fwd_power() - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.tx_interface = self.tester.get_interface(self.tx_port) - self.app_l3fwd_power_path = self.dut.apps_name["l3fwd-power"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tx_interface = self.tg_node.get_interface(self.tx_port) + self.app_l3fwd_power_path = self.sut_node.apps_name["l3fwd-power"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] @@ -49,25 +49,25 @@ class TestVirtioUserInterrupt(TestCase): """ run before each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("rm -rf vhost-net*", "#") - self.l3fwd = self.dut.new_session(suite="l3fwd") - self.vhost = self.dut.new_session(suite="vhost") - self.virtio = self.dut.new_session(suite="virito") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("rm -rf vhost-net*", "#") + self.l3fwd = self.sut_node.new_session(suite="l3fwd") + self.vhost = self.sut_node.new_session(suite="vhost") + self.virtio = self.sut_node.new_session(suite="virito") def close_all_session(self): - self.dut.close_session(self.vhost) - self.dut.close_session(self.virtio) - self.dut.close_session(self.l3fwd) + self.sut_node.close_session(self.vhost) + self.sut_node.close_session(self.virtio) + self.sut_node.close_session(self.l3fwd) def prepare_l3fwd_power(self): - out = self.dut.build_dpdk_apps("./examples/l3fwd-power") + out = self.sut_node.build_dpdk_apps("./examples/l3fwd-power") self.verify("Error" not in out, "compilation l3fwd-power error") @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -79,7 +79,7 @@ class TestVirtioUserInterrupt(TestCase): vdev = "virtio_user0,path=%s,cq=1" % path else: vdev = "virtio_user0,path=%s,cq=1,packed_vq=1" % path - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_l3fwd, prefix="l3fwd-pwd", no_pci=True, vdevs=[vdev] ) if self.check_2M_env: @@ -111,25 +111,25 @@ class TestVirtioUserInterrupt(TestCase): if len(pci) == 0: if dmas: vdev = ["net_vhost0,iface=vhost-net,queues=1,dmas=[%s]" % dmas] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_vhost, ports=allow_pci, vdevs=vdev ) else: - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_vhost, ports=allow_pci, vdevs=vdev ) else: if dmas: vdev = ["net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[%s]" % dmas] para = " -- -i" - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_vhost, ports=allow_pci, prefix="vhost", vdevs=vdev, ) else: - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_vhost, prefix="vhost", no_pci=True, vdevs=vdev ) cmd_vhost_user = testcmd + eal_params + para @@ -146,7 +146,7 @@ class TestVirtioUserInterrupt(TestCase): vdev = "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net" else: vdev = "net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,packed_vq=1" - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_l3fwd, prefix="virtio", no_pci=True, vdevs=[vdev] ) para = " -- -i --txd=512 --rxd=128 --tx-offloads=0x00" @@ -178,7 +178,7 @@ class TestVirtioUserInterrupt(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -207,7 +207,7 @@ class TestVirtioUserInterrupt(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(self.used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -216,11 +216,11 @@ class TestVirtioUserInterrupt(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -239,14 +239,14 @@ class TestVirtioUserInterrupt(TestCase): time.sleep(3) self.check_interrupt_log(status="waked up") # stop ping, check the status of interrupt core - self.dut.send_expect("killall -s INT ping", "#") + self.sut_node.send_expect("killall -s INT ping", "#") time.sleep(2) self.check_interrupt_log(status="sleeps") # restart ping, check the status of interrupt core self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20) time.sleep(3) self.check_interrupt_log(status="waked up") - self.dut.send_expect("killall -s INT ping", "#") + self.sut_node.send_expect("killall -s INT ping", "#") def test_split_ring_virtio_user_interrupt_with_vhost_user_as_backend(self): """ @@ -256,13 +256,13 @@ class TestVirtioUserInterrupt(TestCase): self.launch_l3fwd(path="./vhost-net") # double check the status of interrupt core for i in range(2): - self.tester.scapy_append( + self.tg_node.scapy_append( 'pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pk, iface="%s", count=100)' % self.tx_interface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(3) self.check_interrupt_log(status="waked up") @@ -285,13 +285,13 @@ class TestVirtioUserInterrupt(TestCase): self.launch_l3fwd(path="./vhost-net", packed=True) # double check the status of interrupt core for i in range(2): - self.tester.scapy_append( + self.tg_node.scapy_append( 'pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pk, iface="%s", count=100)' % self.tx_interface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(3) self.check_interrupt_log(status="waked up") @@ -307,14 +307,14 @@ class TestVirtioUserInterrupt(TestCase): time.sleep(3) self.check_interrupt_log(status="waked up") # stop ping, check the status of interrupt core - self.dut.send_expect("killall -s INT ping", "#") + self.sut_node.send_expect("killall -s INT ping", "#") time.sleep(2) self.check_interrupt_log(status="sleeps") # restart ping, check the status of interrupt core self.virtio.send_command("ping -I tap0 1.1.1.1 > aa &", 20) time.sleep(3) self.check_interrupt_log(status="waked up") - self.dut.send_expect("killall -s INT ping", "#") + self.sut_node.send_expect("killall -s INT ping", "#") def test_lsc_event_between_vhost_user_and_virtio_user_with_packed_ring(self): """ @@ -338,8 +338,8 @@ class TestVirtioUserInterrupt(TestCase): self.check_virtio_side_link_status("up") self.vhost.send_expect("quit", "#", 20) self.check_virtio_side_link_status("down") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() def test_split_ring_virtio_user_interrupt_test_with_vhost_user_as_backend_and_cbdma_enabled( @@ -353,17 +353,17 @@ class TestVirtioUserInterrupt(TestCase): self.launch_l3fwd(path="./vhost-net") # double check the status of interrupt core for i in range(2): - self.tester.scapy_append( + self.tg_node.scapy_append( 'pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pk, iface="%s", count=100)' % self.tx_interface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(3) self.check_interrupt_log(status="waked up") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() def test_lsc_event_between_vhost_user_and_virtio_user_with_packed_ring_and_cbdma_enabled( @@ -378,8 +378,8 @@ class TestVirtioUserInterrupt(TestCase): self.check_virtio_side_link_status("up") self.vhost.send_expect("quit", "#", 20) self.check_virtio_side_link_status("down") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() def test_packed_ring_virtio_user_interrupt_test_with_vhost_user_as_backend_and_cbdma_enabled( @@ -393,25 +393,25 @@ class TestVirtioUserInterrupt(TestCase): self.launch_l3fwd(path="./vhost-net", packed=True) # double check the status of interrupt core for i in range(2): - self.tester.scapy_append( + self.tg_node.scapy_append( 'pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pk, iface="%s", count=100)' % self.tx_interface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(3) self.check_interrupt_log(status="waked up") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.close_all_session() def tear_down(self): """ run after each test case. """ - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.bind_cbdma_device_to_kernel() self.close_all_session() diff --git a/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py b/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py index a89ae5d7..e13430c3 100644 --- a/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py +++ b/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py @@ -21,14 +21,14 @@ class TestVirtioUserInterruptCbdma(TestCase): run at the start of each test suite. """ self.core_config = "1S/4C/1T" - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_num = len([n for n in self.dut.cores if int(n["socket"]) == 0]) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_num = len([n for n in self.sut_node.cores if int(n["socket"]) == 0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( self.cores_num >= 4, "There has not enought cores to test this case" ) - self.core_list = self.dut.get_core_list( + self.core_list = self.sut_node.get_core_list( self.core_config, socket=self.ports_socket ) self.core_list_vhost = self.core_list[0:2] @@ -36,43 +36,43 @@ class TestVirtioUserInterruptCbdma(TestCase): self.core_mask_vhost = utils.create_mask(self.core_list_vhost) self.core_mask_l3fwd = utils.create_mask(self.core_list_l3fwd) self.core_mask_virtio = self.core_mask_l3fwd - self.pci_info = self.dut.ports_info[0]["pci"] + self.pci_info = self.sut_node.ports_info[0]["pci"] self.cbdma_dev_infos = [] self.dmas_info = None self.device_str = None self.prepare_l3fwd_power() - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.tx_interface = self.tester.get_interface(self.tx_port) - self.app_l3fwd_power_path = self.dut.apps_name["l3fwd-power"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tx_interface = self.tg_node.get_interface(self.tx_port) + self.app_l3fwd_power_path = self.sut_node.apps_name["l3fwd-power"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user = self.dut.new_session(suite="virtio-user") - self.virtio_pmd = PmdOutput(self.dut, self.virtio_user) - self.l3fwd = self.dut.new_session(suite="l3fwd") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user = self.sut_node.new_session(suite="virtio-user") + self.virtio_pmd = PmdOutput(self.sut_node, self.virtio_user) + self.l3fwd = self.sut_node.new_session(suite="l3fwd") def set_up(self): """ run before each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("rm -rf vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("rm -rf vhost-net*", "#") def close_all_session(self): - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) - self.dut.close_session(self.l3fwd) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) + self.sut_node.close_session(self.l3fwd) def prepare_l3fwd_power(self): - out = self.dut.build_dpdk_apps("./examples/l3fwd-power") + out = self.sut_node.build_dpdk_apps("./examples/l3fwd-power") self.verify("Error" not in out, "compilation l3fwd-power error") @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -84,7 +84,7 @@ class TestVirtioUserInterruptCbdma(TestCase): vdev = "virtio_user0,path=%s,cq=1" % path else: vdev = "virtio_user0,path=%s,cq=1,packed_vq=1" % path - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list_l3fwd, prefix="l3fwd-pwd", no_pci=True, vdevs=[vdev] ) if self.check_2M_env: @@ -129,7 +129,7 @@ class TestVirtioUserInterruptCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -153,7 +153,7 @@ class TestVirtioUserInterruptCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -161,11 +161,11 @@ class TestVirtioUserInterruptCbdma(TestCase): ) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -226,7 +226,7 @@ class TestVirtioUserInterruptCbdma(TestCase): vhost_param = "--rxq=1 --txq=1 --lcore-dma={}".format(lcore_dma) vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net,queues=1,dmas=[txq0]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.logger.info(ports) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, @@ -239,13 +239,13 @@ class TestVirtioUserInterruptCbdma(TestCase): self.launch_l3fwd(path="./vhost-net") # double check the status of interrupt core for _ in range(2): - self.tester.scapy_append( + self.tg_node.scapy_append( 'pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pk, iface="%s", count=100)' % self.tx_interface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(3) self.check_interrupt_log(status="waked up") @@ -304,7 +304,7 @@ class TestVirtioUserInterruptCbdma(TestCase): vhost_param = "--rxq=1 --txq=1 --lcore-dma={}".format(lcore_dma) vhost_eal_param = "--vdev 'net_vhost0,iface=vhost-net,queues=1,dmas=[txq0]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -316,13 +316,13 @@ class TestVirtioUserInterruptCbdma(TestCase): self.launch_l3fwd(path="./vhost-net", packed=True) # double check the status of interrupt core for _ in range(2): - self.tester.scapy_append( + self.tg_node.scapy_append( 'pk=[Ether(dst="52:54:00:00:00:01")/IP()/("X"*64)]' ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp(pk, iface="%s", count=100)' % self.tx_interface ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() time.sleep(3) self.check_interrupt_log(status="waked up") @@ -330,8 +330,8 @@ class TestVirtioUserInterruptCbdma(TestCase): """ run after each test case. """ - self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall %s" % self.l3fwdpower_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): diff --git a/tests/TestSuite_virtio_event_idx_interrupt.py b/tests/TestSuite_virtio_event_idx_interrupt.py index 39c80874..494a6b88 100644 --- a/tests/TestSuite_virtio_event_idx_interrupt.py +++ b/tests/TestSuite_virtio_event_idx_interrupt.py @@ -12,8 +12,8 @@ import re import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -24,22 +24,22 @@ class TestVirtioIdxInterrupt(TestCase): """ self.queues = 1 self.nb_cores = 1 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.ports_socket] ) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.pf_pci = self.dut.ports_info[0]["pci"] + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.pf_pci = self.sut_node.ports_info[0]["pci"] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.device_str = None @@ -49,10 +49,10 @@ class TestVirtioIdxInterrupt(TestCase): """ # Clean the execution ENV self.flag = None - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vhost = self.dut.new_session(suite="vhost") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vhost = self.sut_node.new_session(suite="vhost") def get_core_mask(self): self.core_config = "1S/%dC/1T" % (self.nb_cores + 1) @@ -60,13 +60,13 @@ class TestVirtioIdxInterrupt(TestCase): self.cores_num >= (self.nb_cores + 1), "There has not enough cores to test this case %s" % self.running_case, ) - self.core_list = self.dut.get_core_list(self.core_config) + self.core_list = self.sut_node.get_core_list(self.core_config) def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) cbdma_dev_infos = re.findall("\s*(0000:\S+:\d+.\d+)", out) @@ -82,7 +82,7 @@ class TestVirtioIdxInterrupt(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -91,11 +91,11 @@ class TestVirtioIdxInterrupt(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -122,14 +122,14 @@ class TestVirtioIdxInterrupt(TestCase): "net_vhost,iface=%s/vhost-net,queues=%d,dmas=[%s]" % (self.base_dir, self.queues, dmas) ] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, prefix="vhost", ports=device_str, vdevs=vdev ) else: vdev = [ "net_vhost,iface=%s/vhost-net,queues=%d " % (self.base_dir, self.queues) ] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.core_list, prefix="vhost", ports=[self.pf_pci], vdevs=vdev ) para = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % ( @@ -145,7 +145,7 @@ class TestVirtioIdxInterrupt(TestCase): """ start qemus """ - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") vm_params = {} vm_params["driver"] = "vhost-user" if mode: @@ -164,8 +164,8 @@ class TestVirtioIdxInterrupt(TestCase): vm_params["opt_settings"] = opt_args self.vm.set_vm_device(**vm_params) try: - self.vm_dut = self.vm.start(set_target=set_target, bind_dev=bind_dev) - if self.vm_dut is None: + self.vm_sut = self.vm.start(set_target=set_target, bind_dev=bind_dev) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -175,13 +175,13 @@ class TestVirtioIdxInterrupt(TestCase): config ip for virtio net set net for multi queues enable """ - self.vm_intf = self.vm_dut.ports_info[0]["intf"] - self.vm_dut.send_expect("ifconfig %s down" % self.vm_intf, "#") - out = self.vm_dut.send_expect("ifconfig", "#") + self.vm_intf = self.vm_sut.ports_info[0]["intf"] + self.vm_sut.send_expect("ifconfig %s down" % self.vm_intf, "#") + out = self.vm_sut.send_expect("ifconfig", "#") self.verify(self.vm_intf not in out, "the virtio-pci down failed") - self.vm_dut.send_expect("ifconfig %s up" % self.vm_intf, "#") + self.vm_sut.send_expect("ifconfig %s up" % self.vm_intf, "#") if self.queues > 1: - self.vm_dut.send_expect( + self.vm_sut.send_expect( "ethtool -L %s combined %d" % (self.vm_intf, self.queues), "#", 20 ) @@ -190,25 +190,25 @@ class TestVirtioIdxInterrupt(TestCase): start send packets """ tgen_input = [] - port = self.tester.get_local_port(self.dut_ports[0]) - self.tester.scapy_append( + port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_node.scapy_append( 'a=[Ether(dst="%s")/IP(src="0.240.74.101",proto=255)/UDP()/("X"*18)]' % (self.dst_mac) ) - self.tester.scapy_append('wrpcap("%s/interrupt.pcap", a)' % self.out_path) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s/interrupt.pcap", a)' % self.out_path) + self.tg_node.scapy_execute() tgen_input.append((port, port, "%s/interrupt.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() fields_config = { "ip": { "dst": {"action": "random"}, }, } streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 1, fields_config, self.tester.pktgen + tgen_input, 1, fields_config, self.tg_node.perf_tg ) traffic_opt = {"delay": 5, "duration": delay, "rate": 1} - _, self.flag = self.tester.pktgen.measure_throughput( + _, self.flag = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -221,7 +221,7 @@ class TestVirtioIdxInterrupt(TestCase): _thread.start_new_thread(self.start_to_send_packets, (reload_times * 20,)) # wait the ixia begin to send packets time.sleep(10) - self.vm_pci = self.vm_dut.ports_info[0]["pci"] + self.vm_pci = self.vm_sut.ports_info[0]["pci"] # reload virtio device to check the virtio-net can receive packets for i in range(reload_times + 1): if time.time() - start_time > reload_times * 30: @@ -231,22 +231,22 @@ class TestVirtioIdxInterrupt(TestCase): self.logger.info("The virtio device has reload %d times" % i) return False self.logger.info("The virtio net device reload %d times" % i) - self.vm_dut.send_expect( + self.vm_sut.send_expect( "tcpdump -n -vv -i %s" % self.vm_intf, "tcpdump", 30 ) time.sleep(5) - out = self.vm_dut.get_session_output(timeout=3) - self.vm_dut.send_expect("^c", "#", 30) + out = self.vm_sut.get_session_output(timeout=3) + self.vm_sut.send_expect("^c", "#", 30) self.verify( "ip-proto-255" in out, "The virtio device can not receive packets after reload %d times" % i, ) time.sleep(2) # reload virtio device - self.vm_dut.restore_interfaces() + self.vm_sut.restore_interfaces() time.sleep(3) - self.vm_dut.send_expect("ifconfig %s down" % self.vm_intf, "#") - self.vm_dut.send_expect("ifconfig %s up" % self.vm_intf, "#") + self.vm_sut.send_expect("ifconfig %s down" % self.vm_intf, "#") + self.vm_sut.send_expect("ifconfig %s up" % self.vm_intf, "#") # wait ixia thread exit self.logger.info("wait the thread of ixia to exit") while 1: @@ -407,9 +407,9 @@ class TestVirtioIdxInterrupt(TestCase): """ Run after each test case. """ - self.dut.close_session(self.vhost) - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.close_session(self.vhost) + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): diff --git a/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py b/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py index 4a65c381..17d72b00 100644 --- a/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py +++ b/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py @@ -11,9 +11,9 @@ import _thread import re import time -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -24,27 +24,27 @@ class TestVirtioIdxInterruptCbdma(TestCase): """ self.queues = 1 self.nb_cores = 1 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.core_list = self.dut.get_core_list("all", socket=self.ports_socket) + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.core_list = self.sut_node.get_core_list("all", socket=self.ports_socket) self.core_list_vhost = self.core_list[0:17] self.cores_num = len( - [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] + [n for n in self.sut_node.cores if int(n["socket"]) == self.ports_socket] ) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.pf_pci = self.dut.ports_info[0]["pci"] + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.pf_pci = self.sut_node.ports_info[0]["pci"] self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.vhost_pmd = PmdOutput(self.dut, self.vhost_user) + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.vhost_pmd = PmdOutput(self.sut_node, self.vhost_user) def set_up(self): """ @@ -52,10 +52,10 @@ class TestVirtioIdxInterruptCbdma(TestCase): """ # Clean the execution ENV self.flag = None - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vhost = self.dut.new_session(suite="vhost") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vhost = self.sut_node.new_session(suite="vhost") def get_core_mask(self): self.core_config = "1S/%dC/1T" % (self.nb_cores + 1) @@ -63,7 +63,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.cores_num >= (self.nb_cores + 1), "There has not enough cores to test this case %s" % self.running_case, ) - self.core_list = self.dut.get_core_list(self.core_config) + self.core_list = self.sut_node.get_core_list(self.core_config) def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): """ @@ -72,7 +72,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -96,7 +96,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -104,11 +104,11 @@ class TestVirtioIdxInterruptCbdma(TestCase): ) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -118,7 +118,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): """ start qemus """ - self.vm = VM(self.dut, "vm0", "vhost_sample") + self.vm = VM(self.sut_node, "vm0", "vhost_sample") vm_params = {} vm_params["driver"] = "vhost-user" if mode: @@ -137,8 +137,8 @@ class TestVirtioIdxInterruptCbdma(TestCase): vm_params["opt_settings"] = opt_args self.vm.set_vm_device(**vm_params) try: - self.vm_dut = self.vm.start(set_target=set_target, bind_dev=bind_dev) - if self.vm_dut is None: + self.vm_sut = self.vm.start(set_target=set_target, bind_dev=bind_dev) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -148,13 +148,13 @@ class TestVirtioIdxInterruptCbdma(TestCase): config ip for virtio net set net for multi queues enable """ - self.vm_intf = self.vm_dut.ports_info[0]["intf"] - self.vm_dut.send_expect("ifconfig %s down" % self.vm_intf, "#") - out = self.vm_dut.send_expect("ifconfig", "#") + self.vm_intf = self.vm_sut.ports_info[0]["intf"] + self.vm_sut.send_expect("ifconfig %s down" % self.vm_intf, "#") + out = self.vm_sut.send_expect("ifconfig", "#") self.verify(self.vm_intf not in out, "the virtio-pci down failed") - self.vm_dut.send_expect("ifconfig %s up" % self.vm_intf, "#") + self.vm_sut.send_expect("ifconfig %s up" % self.vm_intf, "#") if self.queues > 1: - self.vm_dut.send_expect( + self.vm_sut.send_expect( "ethtool -L %s combined %d" % (self.vm_intf, self.queues), "#", 20 ) @@ -163,25 +163,25 @@ class TestVirtioIdxInterruptCbdma(TestCase): start send packets """ tgen_input = [] - port = self.tester.get_local_port(self.dut_ports[0]) - self.tester.scapy_append( + port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_node.scapy_append( 'a=[Ether(dst="%s")/IP(src="0.240.74.101",proto=255)/UDP()/("X"*18)]' % (self.dst_mac) ) - self.tester.scapy_append('wrpcap("%s/interrupt.pcap", a)' % self.out_path) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s/interrupt.pcap", a)' % self.out_path) + self.tg_node.scapy_execute() tgen_input.append((port, port, "%s/interrupt.pcap" % self.out_path)) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() fields_config = { "ip": { "dst": {"action": "random"}, }, } streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 1, fields_config, self.tester.pktgen + tgen_input, 1, fields_config, self.tg_node.perf_tg ) traffic_opt = {"delay": 5, "duration": delay, "rate": 1} - _, self.flag = self.tester.pktgen.measure_throughput( + _, self.flag = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) @@ -194,7 +194,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): _thread.start_new_thread(self.start_to_send_packets, (reload_times * 20,)) # wait the ixia begin to send packets time.sleep(10) - self.vm_pci = self.vm_dut.ports_info[0]["pci"] + self.vm_pci = self.vm_sut.ports_info[0]["pci"] # reload virtio device to check the virtio-net can receive packets for i in range(reload_times + 1): if time.time() - start_time > reload_times * 30: @@ -204,22 +204,22 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.logger.info("The virtio device has reload %d times" % i) return False self.logger.info("The virtio net device reload %d times" % i) - self.vm_dut.send_expect( + self.vm_sut.send_expect( "tcpdump -n -vv -i %s" % self.vm_intf, "tcpdump", 30 ) time.sleep(5) - out = self.vm_dut.get_session_output(timeout=3) - self.vm_dut.send_expect("^c", "#", 30) + out = self.vm_sut.get_session_output(timeout=3) + self.vm_sut.send_expect("^c", "#", 30) self.verify( "ip-proto-255" in out, "The virtio device can not receive packets after reload %d times" % i, ) time.sleep(2) # reload virtio device - self.vm_dut.restore_interfaces() + self.vm_sut.restore_interfaces() time.sleep(3) - self.vm_dut.send_expect("ifconfig %s down" % self.vm_intf, "#") - self.vm_dut.send_expect("ifconfig %s up" % self.vm_intf, "#") + self.vm_sut.send_expect("ifconfig %s down" % self.vm_intf, "#") + self.vm_sut.send_expect("ifconfig %s up" % self.vm_intf, "#") # wait ixia thread exit self.logger.info("wait the thread of ixia to exit") while 1: @@ -267,7 +267,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): ) vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[txq0]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -314,7 +314,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): ) vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -341,7 +341,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): ) vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[txq0]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -388,7 +388,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): ) vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15]'" ports = self.cbdma_list - ports.append(self.dut.ports_info[0]["pci"]) + ports.append(self.sut_node.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( cores=self.core_list_vhost, ports=ports, @@ -408,12 +408,12 @@ class TestVirtioIdxInterruptCbdma(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost) + self.sut_node.close_session(self.vhost) diff --git a/tests/TestSuite_virtio_ipsec_cryptodev_func.py b/tests/TestSuite_virtio_ipsec_cryptodev_func.py index 0a7b1e88..d7c0ebb4 100644 --- a/tests/TestSuite_virtio_ipsec_cryptodev_func.py +++ b/tests/TestSuite_virtio_ipsec_cryptodev_func.py @@ -13,15 +13,15 @@ import time import framework.utils as utils import tests.cryptodev_common as cc -from framework.packet import Packet from framework.qemu_kvm import QEMUKvm +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase class VirtioCryptodevIpsecTest(TestCase): def set_up_all(self): - self.sample_app = self.dut.apps_name["vhost_crypto"] - self.user_app = self.dut.apps_name["ipsec-secgw"] + self.sample_app = self.sut_node.apps_name["vhost_crypto"] + self.user_app = self.sut_node.apps_name["ipsec-secgw"] self._default_ipsec_gw_opts = { "p": "0x3", "config": None, @@ -29,22 +29,22 @@ class VirtioCryptodevIpsecTest(TestCase): "u": "0x1", } - self.vm0, self.vm0_dut = None, None - self.vm1, self.vm1_dut = None, None - self.dut.skip_setup = True + self.vm0, self.vm0_sut = None, None + self.vm1, self.vm1_sut = None, None + self.sut_node.skip_setup = True - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 4, "Insufficient ports for test") - self.cores = self.dut.get_core_list("1S/5C/1T") - self.mem_channel = self.dut.get_memory_channels() - self.port_mask = utils.create_mask([self.dut_ports[0]]) - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 4, "Insufficient ports for test") + self.cores = self.sut_node.get_core_list("1S/5C/1T") + self.mem_channel = self.sut_node.get_memory_channels() + self.port_mask = utils.create_mask([self.sut_ports[0]]) + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) - self.tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.rx_port = self.tester.get_local_port(self.dut_ports[-1]) + self.tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.rx_port = self.tg_node.get_local_port(self.sut_ports[-1]) - self.tx_interface = self.tester.get_interface(self.tx_port) - self.rx_interface = self.tester.get_interface(self.rx_port) + self.tx_interface = self.tg_node.get_interface(self.tx_port) + self.rx_interface = self.tg_node.get_interface(self.rx_port) self.logger.info("tx interface = " + self.tx_interface) self.logger.info("rx interface = " + self.rx_interface) @@ -52,19 +52,19 @@ class VirtioCryptodevIpsecTest(TestCase): self.sriov_port = self.bind_vfio_pci() cc.bind_qat_device(self, self.drivername) - self.dut.build_dpdk_apps("./examples/vhost_crypto") + self.sut_node.build_dpdk_apps("./examples/vhost_crypto") self.bind_vfio_pci() self.launch_vhost_switch() - self.vm0, self.vm0_dut = self.launch_virtio_dut("vm0") - self.vm1, self.vm1_dut = self.launch_virtio_dut("vm1") + self.vm0, self.vm0_sut = self.launch_virtio_sut("vm0") + self.vm1, self.vm1_sut = self.launch_virtio_sut("vm1") def set_up(self): pass - def dut_execut_cmd(self, cmdline, ex="#", timout=30): - return self.dut.send_expect(cmdline, ex, timout) + def sut_execut_cmd(self, cmdline, ex="#", timout=30): + return self.sut_node.send_expect(cmdline, ex, timout) def get_vhost_eal(self): default_eal_opts = { @@ -93,7 +93,7 @@ class VirtioCryptodevIpsecTest(TestCase): return opt_str - def cfg_prepare(self, dut): + def cfg_prepare(self, sut): """ ipsec configuration file """ @@ -121,15 +121,15 @@ class VirtioCryptodevIpsecTest(TestCase): "rt ipv4 dst 192.168.105.0/24 port 1\n" ) - self.set_cfg(dut, "ep0.cfg", ep0) - self.set_cfg(dut, "ep1.cfg", ep1) + self.set_cfg(sut, "ep0.cfg", ep0) + self.set_cfg(sut, "ep1.cfg", ep1) - def set_cfg(self, dut, filename, cfg): + def set_cfg(self, sut, filename, cfg): with open(filename, "w") as f: f.write(cfg) - dut.session.copy_file_to(filename, dut.base_dir) - dut.session.copy_file_to(filename, dut.base_dir) + sut.session.copy_file_to(filename, sut.base_dir) + sut.session.copy_file_to(filename, sut.base_dir) def launch_vhost_switch(self): eal_opt_str = self.get_vhost_eal() @@ -147,37 +147,37 @@ class VirtioCryptodevIpsecTest(TestCase): eal_opt_str, "--config %s --socket-file %s" % (config, socket_file), ) - self.dut_execut_cmd("rm -r /tmp/*") - out = self.dut_execut_cmd(self.vhost_switch_cmd, "socket created", 30) + self.sut_execut_cmd("rm -r /tmp/*") + out = self.sut_execut_cmd(self.vhost_switch_cmd, "socket created", 30) self.logger.info(out) def bind_vfio_pci(self): self.vf_assign_method = "vfio-pci" - self.dut.setup_modules(None, self.vf_assign_method, None) + self.sut_node.setup_modules(None, self.vf_assign_method, None) sriov_ports = [] - for port in self.dut.ports_info: + for port in self.sut_node.ports_info: port["port"].bind_driver("vfio-pci") sriov_ports.append(port["port"]) return sriov_ports - def set_virtio_pci(self, dut): - out = dut.send_expect("lspci -d:1054|awk '{{print $1}}'", "# ", 10) + def set_virtio_pci(self, sut): + out = sut.send_expect("lspci -d:1054|awk '{{print $1}}'", "# ", 10) virtio_list = out.replace("\r", "\n").replace("\n\n", "\n").split("\n") - dut.send_expect("modprobe uio_pci_generic", "#", 10) + sut.send_expect("modprobe uio_pci_generic", "#", 10) for line in virtio_list: cmd = "echo 0000:{} > /sys/bus/pci/devices/0000\:{}/driver/unbind".format( line, line.replace(":", "\:") ) - dut.send_expect(cmd, "# ", 10) - dut.send_expect( + sut.send_expect(cmd, "# ", 10) + sut.send_expect( 'echo "1af4 1054" > /sys/bus/pci/drivers/uio_pci_generic/new_id', "# ", 10 ) return virtio_list - def launch_virtio_dut(self, vm_name): - vm = QEMUKvm(self.dut, vm_name, "virtio_ipsec_cryptodev_func") + def launch_virtio_sut(self, vm_name): + vm = QEMUKvm(self.sut_node, vm_name, "virtio_ipsec_cryptodev_func") if vm_name == "vm0": vf0 = {"opt_host": self.sriov_port[0].pci} vf1 = {"opt_host": self.sriov_port[1].pci} @@ -187,69 +187,69 @@ class VirtioCryptodevIpsecTest(TestCase): vm.set_vm_device(driver=self.vf_assign_method, **vf0) vm.set_vm_device(driver=self.vf_assign_method, **vf1) - skip_setup = self.dut.skip_setup + skip_setup = self.sut_node.skip_setup try: - self.dut.skip_setup = True - vm_dut = vm.start() - if vm_dut is None: + self.sut_node.skip_setup = True + vm_sut = vm.start() + if vm_sut is None: print(("{} start failed".format(vm_name))) except Exception as err: raise err - self.dut.skip_setup = skip_setup - vm_dut.restore_interfaces() + self.sut_node.skip_setup = skip_setup + vm_sut.restore_interfaces() - vm_dut.build_dpdk_apps("./examples/ipsec-secgw") + vm_sut.build_dpdk_apps("./examples/ipsec-secgw") - vm_dut.setup_modules(self.target, self.drivername, None) - vm_dut.bind_interfaces_linux(self.drivername) - vm.virtio_list = self.set_virtio_pci(vm_dut) + vm_sut.setup_modules(self.target, self.drivername, None) + vm_sut.bind_interfaces_linux(self.drivername) + vm.virtio_list = self.set_virtio_pci(vm_sut) self.logger.info("{} virtio list: {}".format(vm_name, vm.virtio_list)) - vm.cores = vm_dut.get_core_list("all") + vm.cores = vm_sut.get_core_list("all") self.logger.info("{} core list: {}".format(vm_name, vm.cores)) - vm.ports = [port["pci"] for port in vm_dut.ports_info] + vm.ports = [port["pci"] for port in vm_sut.ports_info] self.logger.info("{} port list: {}".format(vm_name, vm.ports)) - self.cfg_prepare(vm_dut) + self.cfg_prepare(vm_sut) - return vm, vm_dut + return vm, vm_sut def send_and_dump_pkg(self): status = True - inst = self.tester.tcpdump_sniff_packets(self.rx_interface) + inst = self.tg_node.tcpdump_sniff_packets(self.rx_interface) PACKET_COUNT = 65 payload = 256 * ["11"] - pkt = Packet() - pkt.assign_layers(["ether", "ipv4", "udp", "raw"]) - pkt.config_layer( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "ipv4", "udp", "raw"]) + scapy_pkt_builder.config_layer( "ether", {"src": "52:00:00:00:00:00", "dst": "52:00:00:00:00:01"} ) src_ip = "192.168.105.200" dst_ip = "192.168.105.100" - pkt.config_layer("ipv4", {"src": src_ip, "dst": dst_ip}) - pkt.config_layer("udp", {"src": 1111, "dst": 2222}) - pkt.config_layer("raw", {"payload": payload}) - pkt.send_pkt(self.tester, tx_port=self.tx_interface, count=PACKET_COUNT) - pkt_rec = self.tester.load_tcpdump_sniff_packets(inst) - - self.logger.info("dump: {} packets".format(len(pkt_rec))) - if len(pkt_rec) != PACKET_COUNT: + scapy_pkt_builder.config_layer("ipv4", {"src": src_ip, "dst": dst_ip}) + scapy_pkt_builder.config_layer("udp", {"src": 1111, "dst": 2222}) + scapy_pkt_builder.config_layer("raw", {"payload": payload}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.tx_interface, count=PACKET_COUNT) + scapy_pkts = self.tg_node.load_tcpdump_sniff_packets(inst) + + self.logger.info("dump: {} packets".format(len(scapy_pkts))) + if len(scapy_pkts) != PACKET_COUNT: self.logger.info( - "dump pkg: {}, the num of pkg dumped is incorrtct!".format(len(pkt_rec)) + "dump pkt: {}, the num of pkt dumped is incorrtct!".format(len(scapy_pkts)) ) status = False - for i in range(len(pkt_rec)): - if src_ip != pkt_rec.pktgen.strip_layer3( + for i in range(len(scapy_pkts)): + if src_ip != scapy_pkts.scapy_pkt_util.strip_layer3( "src", p_index=i - ) or dst_ip != pkt_rec.pktgen.strip_layer3("dst", p_index=i): + ) or dst_ip != scapy_pkts.scapy_pkt_util.strip_layer3("dst", p_index=i): self.logger.info("the ip of pkg dumped is incorrtct!") status = False dump_text = str( - binascii.b2a_hex(pkt_rec[i]["Raw"].getfieldval("load")), + binascii.b2a_hex(scapy_pkts[i]["Raw"].getfieldval("load")), encoding="utf-8", ) if dump_text != "".join(payload): @@ -284,7 +284,7 @@ class VirtioCryptodevIpsecTest(TestCase): ) out0 = self._run_crypto_ipsec( - self.vm0_dut, eal_opt_str_0, crypto_ipsec_opt_str0 + self.vm0_sut, eal_opt_str_0, crypto_ipsec_opt_str0 ) self.logger.info(out0) @@ -307,7 +307,7 @@ class VirtioCryptodevIpsecTest(TestCase): }, ) out1 = self._run_crypto_ipsec( - self.vm1_dut, eal_opt_str_1, crypto_ipsec_opt_str1 + self.vm1_sut, eal_opt_str_1, crypto_ipsec_opt_str1 ) self.logger.info(out1) @@ -337,7 +337,7 @@ class VirtioCryptodevIpsecTest(TestCase): }, ) out0 = self._run_crypto_ipsec( - self.vm0_dut, eal_opt_str_0, crypto_ipsec_opt_str0 + self.vm0_sut, eal_opt_str_0, crypto_ipsec_opt_str0 ) self.logger.info(out0) @@ -360,20 +360,20 @@ class VirtioCryptodevIpsecTest(TestCase): }, ) out1 = self._run_crypto_ipsec( - self.vm1_dut, eal_opt_str_1, crypto_ipsec_opt_str1 + self.vm1_sut, eal_opt_str_1, crypto_ipsec_opt_str1 ) self.logger.info(out1) result = self.send_and_dump_pkg() self.verify(result, "FAILED") - def _run_crypto_ipsec(self, vm_dut, eal_opt_str, case_opt_str): + def _run_crypto_ipsec(self, vm_sut, eal_opt_str, case_opt_str): cmd_str = cc.get_dpdk_app_cmd_str( self.user_app, eal_opt_str, case_opt_str + " -l" ) self.logger.info(cmd_str) try: - out = vm_dut.send_expect(cmd_str, "IPSEC", 600) + out = vm_sut.send_expect(cmd_str, "IPSEC", 600) except Exception as ex: self.logger.error(ex) raise ex @@ -381,27 +381,27 @@ class VirtioCryptodevIpsecTest(TestCase): return out def tear_down(self): - self.vm0_dut.send_expect("^C", "# ") - self.vm1_dut.send_expect("^C", "# ") + self.vm0_sut.send_expect("^C", "# ") + self.vm1_sut.send_expect("^C", "# ") def tear_down_all(self): if getattr(self, "vm0", None): - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() self.vm0.stop() self.vm0 = None if getattr(self, "vm1", None): - self.vm1_dut.kill_all() + self.vm1_sut.kill_all() self.vm1.stop() self.vm1 = None if self.vm1: self.vm1.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() self.vm1 = None - self.dut_execut_cmd("^C", "# ") + self.sut_execut_cmd("^C", "# ") self.app_name = self.sample_app[self.sample_app.rfind("/") + 1 :] - self.dut.send_expect("killall -s INT %s" % self.app_name, "#") - self.dut_execut_cmd("killall -s INT qemu-system-x86_64") - self.dut_execut_cmd("rm -r /tmp/*") + self.sut_node.send_expect("killall -s INT %s" % self.app_name, "#") + self.sut_execut_cmd("killall -s INT qemu-system-x86_64") + self.sut_execut_cmd("rm -r /tmp/*") diff --git a/tests/TestSuite_virtio_perf_cryptodev_func.py b/tests/TestSuite_virtio_perf_cryptodev_func.py index 9b25da05..4254000b 100644 --- a/tests/TestSuite_virtio_perf_cryptodev_func.py +++ b/tests/TestSuite_virtio_perf_cryptodev_func.py @@ -17,8 +17,8 @@ from framework.test_case import TestCase class VirtioCryptodevPerfTest(TestCase): def set_up_all(self): - self.sample_app = self.dut.apps_name["vhost_crypto"] - self.user_app = self.dut.apps_name["test-crypto-perf"] + self.sample_app = self.sut_node.apps_name["vhost_crypto"] + self.user_app = self.sut_node.apps_name["test-crypto-perf"] self._default_crypto_perf_opts = { "ptest": "throughput", "silent": "", @@ -50,39 +50,39 @@ class VirtioCryptodevPerfTest(TestCase): "csv-friendly": None, } - self.vm0, self.vm0_dut = None, None - self.dut.skip_setup = True + self.vm0, self.vm0_sut = None, None + self.sut_node.skip_setup = True - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for test") - self.cores = self.dut.get_core_list("1S/3C/1T") - self.mem_channel = self.dut.get_memory_channels() + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for test") + self.cores = self.sut_node.get_core_list("1S/3C/1T") + self.mem_channel = self.sut_node.get_memory_channels() - self.dut.build_dpdk_apps("./examples/vhost_crypto") + self.sut_node.build_dpdk_apps("./examples/vhost_crypto") cc.bind_qat_device(self, self.drivername) self.vf_assign_method = "vfio-pci" - self.dut.setup_modules(None, self.vf_assign_method, None) + self.sut_node.setup_modules(None, self.vf_assign_method, None) - self.dut.restore_interfaces() - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver="default") - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.restore_interfaces() + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver="default") + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_assign_method) - intf = self.dut.ports_info[self.used_dut_port]["intf"] + intf = self.sut_node.ports_info[self.used_sut_port]["intf"] vf_mac = "52:00:00:00:00:01" - self.dut.send_expect("ip link set %s vf 0 mac %s" % (intf, vf_mac), "# ") + self.sut_node.send_expect("ip link set %s vf 0 mac %s" % (intf, vf_mac), "# ") self.launch_vhost_switch() - self.vm0, self.vm0_dut = self.launch_virtio_dut("vm0") + self.vm0, self.vm0_sut = self.launch_virtio_sut("vm0") def set_up(self): pass - def dut_execut_cmd(self, cmdline, ex="#", timout=30): - return self.dut.send_expect(cmdline, ex, timout) + def sut_execut_cmd(self, cmdline, ex="#", timout=30): + return self.sut_node.send_expect(cmdline, ex, timout) def get_vhost_eal(self): default_eal_opts = { @@ -123,53 +123,53 @@ class VirtioCryptodevPerfTest(TestCase): "--config %s --socket-file %s" % (config, socket_file), ) - out = self.dut_execut_cmd(self.vhost_switch_cmd, "socket created", 30) + out = self.sut_execut_cmd(self.vhost_switch_cmd, "socket created", 30) self.logger.info(out) - def set_virtio_pci(self, dut): - out = dut.send_expect("lspci -d:1054|awk '{{print $1}}'", "# ", 10) + def set_virtio_pci(self, sut): + out = sut.send_expect("lspci -d:1054|awk '{{print $1}}'", "# ", 10) virtio_list = out.replace("\r", "\n").replace("\n\n", "\n").split("\n") - dut.send_expect("modprobe uio_pci_generic", "#", 10) + sut.send_expect("modprobe uio_pci_generic", "#", 10) for line in virtio_list: cmd = "echo 0000:{} > /sys/bus/pci/devices/0000\:{}/driver/unbind".format( line, line.replace(":", "\:") ) - dut.send_expect(cmd, "# ", 10) - dut.send_expect( + sut.send_expect(cmd, "# ", 10) + sut.send_expect( 'echo "1af4 1054" > /sys/bus/pci/drivers/uio_pci_generic/new_id', "# ", 10 ) return virtio_list - def launch_virtio_dut(self, vm_name): + def launch_virtio_sut(self, vm_name): # start vm - vm = QEMUKvm(self.dut, vm_name, "virtio_perf_cryptodev_func") + vm = QEMUKvm(self.sut_node, vm_name, "virtio_perf_cryptodev_func") vf0 = {"opt_host": self.sriov_vfs_port[0].pci} vm.set_vm_device(driver=self.vf_assign_method, **vf0) - skip_setup = self.dut.skip_setup + skip_setup = self.sut_node.skip_setup try: - self.dut.skip_setup = True - vm_dut = vm.start() - if vm_dut is None: + self.sut_node.skip_setup = True + vm_sut = vm.start() + if vm_sut is None: print(("{} start failed".format(vm_name))) except Exception as err: raise err - self.dut.skip_setup = skip_setup - vm_dut.restore_interfaces() + self.sut_node.skip_setup = skip_setup + vm_sut.restore_interfaces() - vm_dut.setup_modules(self.target, self.drivername, None) - vm_dut.bind_interfaces_linux(self.drivername) - vm.virtio_list = self.set_virtio_pci(vm_dut) + vm_sut.setup_modules(self.target, self.drivername, None) + vm_sut.bind_interfaces_linux(self.drivername) + vm.virtio_list = self.set_virtio_pci(vm_sut) self.logger.info("{} virtio list: {}".format(vm_name, vm.virtio_list)) - vm.cores = vm_dut.get_core_list("all") + vm.cores = vm_sut.get_core_list("all") self.logger.info("{} core list: {}".format(vm_name, vm.cores)) - vm.ports = [port["pci"] for port in vm_dut.ports_info] + vm.ports = [port["pci"] for port in vm_sut.ports_info] self.logger.info("{} port list: {}".format(vm_name, vm.ports)) - return vm, vm_dut + return vm, vm_sut def test_aesni_mb_aes_cbc_sha1_hmac(self): if cc.is_test_skip(self): @@ -201,7 +201,7 @@ class VirtioCryptodevPerfTest(TestCase): ) self.logger.info(cmd_str) try: - out = self.vm0_dut.send_expect(cmd_str, "#", 600) + out = self.vm0_sut.send_expect(cmd_str, "#", 600) except Exception as ex: self.logger.error(ex) raise ex @@ -213,16 +213,16 @@ class VirtioCryptodevPerfTest(TestCase): def tear_down_all(self): if getattr(self, "vm0", None): - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() self.vm0.stop() self.vm0 = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - self.used_dut_port = None + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + self.used_sut_port = None - self.dut_execut_cmd("^C", "# ") + self.sut_execut_cmd("^C", "# ") self.app_name = self.sample_app[self.sample_app.rfind("/") + 1 :] - self.dut.send_expect("killall -s INT %s" % self.app_name, "#") - self.dut_execut_cmd("killall -s INT qemu-system-x86_64") - self.dut_execut_cmd("rm -r /tmp/*") + self.sut_node.send_expect("killall -s INT %s" % self.app_name, "#") + self.sut_execut_cmd("killall -s INT qemu-system-x86_64") + self.sut_execut_cmd("rm -r /tmp/*") diff --git a/tests/TestSuite_virtio_pvp_regression.py b/tests/TestSuite_virtio_pvp_regression.py index 83d0e6bc..13bc2171 100644 --- a/tests/TestSuite_virtio_pvp_regression.py +++ b/tests/TestSuite_virtio_pvp_regression.py @@ -18,32 +18,32 @@ import re import time import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM class TestVirtioPVPRegression(TestCase): def set_up_all(self): # Get and verify the ports - self.dut_ports = self.dut.get_ports() - self.pf = self.dut_ports[0] + self.sut_ports = self.sut_node.get_ports() + self.pf = self.sut_ports[0] self.number_of_ports = 1 # Get the port's socket - netdev = self.dut.ports_info[self.pf]["port"] - self.pci_info = self.dut.ports_info[self.pf]["pci"] + netdev = self.sut_node.ports_info[self.pf]["port"] + self.pci_info = self.sut_node.ports_info[self.pf]["pci"] self.socket = netdev.get_nic_socket() - self.cores = self.dut.get_core_list("1S/3C/1T", socket=self.socket) + self.cores = self.sut_node.get_core_list("1S/3C/1T", socket=self.socket) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.verify( len(self.cores) >= 3, "There has not enought cores to test this suite" ) self.port_number = 2 self.queues_number = 2 - self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) - self.vm_dut = None + self.dst_mac = self.sut_node.get_mac_address(self.sut_ports[0]) + self.vm_sut = None self.packet_params_set() self.logger.info( @@ -58,28 +58,28 @@ class TestVirtioPVPRegression(TestCase): res = self.verify_qemu_version_config() self.verify(res is True, "The path of qemu version in config file not right") - if len(set([int(core["socket"]) for core in self.dut.cores])) == 1: + if len(set([int(core["socket"]) for core in self.sut_node.cores])) == 1: self.socket_mem = "1024" else: self.socket_mem = "1024,1024" # the path of pcap file self.out_path = "/tmp/%s" % self.suite_name - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] def set_up(self): """ Run before each test case. """ - self.vhost = self.dut.new_session(suite="vhost-user") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vhost = self.sut_node.new_session(suite="vhost-user") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") def packet_params_set(self): self.frame_sizes = [64, 1518] @@ -121,7 +121,7 @@ class TestVirtioPVPRegression(TestCase): """ verify the config has config right qemu version """ - self.vm = VM(self.dut, "vm0", self.suite_name) + self.vm = VM(self.sut_node, "vm0", self.suite_name) self.vm.load_config() # get qemu version list from config file self.get_qemu_list_from_config() @@ -129,22 +129,22 @@ class TestVirtioPVPRegression(TestCase): for i in range(qemu_num): qemu_path = self.qemu_list[i]["path"] - out = self.dut.send_expect("ls %s" % qemu_path, "#") + out = self.sut_node.send_expect("ls %s" % qemu_path, "#") if "No such file or directory" in out: self.logger.error( - "No emulator [ %s ] on the DUT [ %s ]" - % (qemu_path, self.dut.get_ip_address()) + "No emulator [ %s ] on the SUT [ %s ]" + % (qemu_path, self.sut_node.get_ip_address()) ) return False - out = self.dut.send_expect("[ -x %s ];echo $?" % qemu_path, "# ") + out = self.sut_node.send_expect("[ -x %s ];echo $?" % qemu_path, "# ") if out != "0": self.logger.error( - "Emulator [ %s ] not executable on the DUT [ %s ]" - % (qemu_path, self.dut.get_ip_address()) + "Emulator [ %s ] not executable on the SUT [ %s ]" + % (qemu_path, self.sut_node.get_ip_address()) ) return False - out = self.dut.send_expect("%s --version" % qemu_path, "#") + out = self.sut_node.send_expect("%s --version" % qemu_path, "#") result = re.search("QEMU\s*emulator\s*version\s*(\d*.\d*)", out) version = result.group(1) @@ -200,7 +200,7 @@ class TestVirtioPVPRegression(TestCase): """ start vm """ - self.vm = VM(self.dut, "vm0", self.suite_name) + self.vm = VM(self.sut_node, "vm0", self.suite_name) vm_params = {} vm_params["driver"] = "vhost-user" vm_params["opt_path"] = "%s/vhost-net" % self.base_dir @@ -237,8 +237,8 @@ class TestVirtioPVPRegression(TestCase): # Due to we have change the params info before, # so need to start vm with load_config=False try: - self.vm_dut = self.vm.start(load_config=False) - if self.vm_dut is None: + self.vm_sut = self.vm.start(load_config=False) + if self.vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: self.logger.error("ERROR: Failure for %s" % str(e)) @@ -252,7 +252,7 @@ class TestVirtioPVPRegression(TestCase): r"'eth_vhost0,iface=%s/vhost-net,queues=%d,client=1'" % (self.base_dir, self.queues_number) ] - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores, prefix="vhost", ports=[self.pci_info], vdevs=vdev ) para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --txd=1024 --rxd=1024" % ( @@ -278,11 +278,11 @@ class TestVirtioPVPRegression(TestCase): Start testpmd in vm """ self.verify( - len(self.vm_dut.cores) >= 3, + len(self.vm_sut.cores) >= 3, "The vm does not have enough core to start testpmd, " "please config it in %s.cfg" % self.suite_name, ) - if self.vm_dut is not None: + if self.vm_sut is not None: opt_args = "" if virtio_path in ["mergeable", "normal"]: opt_args = "--enable-hw-vlan-strip" @@ -297,9 +297,9 @@ class TestVirtioPVPRegression(TestCase): self.queues_number, self.queues_number, ) - self.vm_dut.send_expect(vm_testpmd, "testpmd> ", 20) - self.vm_dut.send_expect("set fwd mac", "testpmd> ", 20) - self.vm_dut.send_expect("start", "testpmd> ") + self.vm_sut.send_expect(vm_testpmd, "testpmd> ", 20) + self.vm_sut.send_expect("set fwd mac", "testpmd> ", 20) + self.vm_sut.send_expect("start", "testpmd> ") def check_packets_of_each_queue(self, frame_size): """ @@ -357,29 +357,29 @@ class TestVirtioPVPRegression(TestCase): self.src1, payload, ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s/pvp_diff_qemu_version.pcap", %s)' % (self.out_path, flow) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() tgenInput = [] - port = self.tester.get_local_port(self.pf) + port = self.tg_node.get_local_port(self.pf) tgenInput.append( (port, port, "%s/pvp_diff_qemu_version.pcap" % self.out_path) ) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() fields_config = { "ip": { "dst": {"range": 127, "step": 1, "action": "random"}, }, } streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, fields_config, self.tester.pktgen + tgenInput, 100, fields_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5, "duration": 20} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -408,12 +408,12 @@ class TestVirtioPVPRegression(TestCase): stop testpmd in vhost and qemu close the qemu """ - self.vm_dut.send_expect("quit", "#", 20) + self.vm_sut.send_expect("quit", "#", 20) self.vhost.send_expect("quit", "#", 20) self.vm.stop() - self.dut.send_expect("killall -I %s" % self.testpmd_name, "#", 20) - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -I %s" % self.testpmd_name, "#", 20) + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") def pvp_regression_run(self, case_info, modem, virtio_path, packed=False): """ @@ -441,12 +441,12 @@ class TestVirtioPVPRegression(TestCase): self.send_verify(case_info, version, "before reconnect") self.logger.info("now reconnect from vhost") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") self.start_testpmd_as_vhost() self.send_verify(case_info, version, "reconnect from vhost") self.logger.info("now reconnect from vm") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vm(path, version, modem, virtio_path, packed=packed) self.start_testpmd_in_vm(virtio_path) self.send_verify(case_info, version, "reconnect from vm") @@ -539,9 +539,9 @@ class TestVirtioPVPRegression(TestCase): Run after each test case. Clear qemu and testpmd to avoid blocking the following TCs """ - self.dut.close_session(self.vhost) - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.close_session(self.vhost) + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") time.sleep(2) def tear_down_all(self): diff --git a/tests/TestSuite_virtio_smoke.py b/tests/TestSuite_virtio_smoke.py index db041cf8..1f89bac8 100644 --- a/tests/TestSuite_virtio_smoke.py +++ b/tests/TestSuite_virtio_smoke.py @@ -21,32 +21,32 @@ class TestVirtioSmoke(TestCase): Run at the start of each test suite. """ self.dst_mac = "00:01:02:03:04:05" - self.dut_ports = self.dut.get_ports() - self.txItf = self.tester.get_interface( - self.tester.get_local_port(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.txItf = self.tg_node.get_interface( + self.tg_node.get_local_port(self.sut_ports[0]) ) - self.socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("all", socket=self.socket) + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("all", socket=self.socket) self.vhost_cores = self.cores[0:3] self.virtio1_cores = self.cores[3:6] - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.path = self.dut.apps_name["test-pmd"] + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.path.split("/")[-1] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user1 = self.dut.new_session(suite="virtio-user1") - self.pmdout_vhost_user = PmdOutput(self.dut, self.vhost_user) - self.pmdout_virtio_user1 = PmdOutput(self.dut, self.virtio_user1) + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user1 = self.sut_node.new_session(suite="virtio-user1") + self.pmdout_vhost_user = PmdOutput(self.sut_node, self.vhost_user) + self.pmdout_virtio_user1 = PmdOutput(self.sut_node, self.virtio_user1) def set_up(self): """ Run before each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) # On CentOS, sometimes return ' 2048' @@ -145,10 +145,10 @@ class TestVirtioSmoke(TestCase): def send_packets(self, frame_size, pkt_count): pkt = "Ether(dst='%s')/IP()/('x'*%d)" % (self.dst_mac, frame_size) - self.tester.scapy_append( + self.tg_node.scapy_append( 'sendp([%s], iface="%s", count=%s)' % (pkt, self.txItf, pkt_count) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() def verify_virtio_user_receive_packets(self, pkt_count): out = self.pmdout_virtio_user1.execute_cmd("show port stats all") @@ -174,7 +174,7 @@ class TestVirtioSmoke(TestCase): nb_core = 2 vhost_rxd_txd = 1024 vhost_param = param.format(nb_core, vhost_rxd_txd, vhost_rxd_txd) - port = self.dut.ports_info[self.dut_ports[0]]["pci"] + port = self.sut_node.ports_info[self.sut_ports[0]]["pci"] self.launch_testpmd_as_vhost_user( param=vhost_param, cores=self.vhost_cores, @@ -194,8 +194,8 @@ class TestVirtioSmoke(TestCase): self.pmdout_virtio_user1.execute_cmd("start") self.logger.info("Start send packets and verify") - # set tester port MTU=9000 when need to send big packets. - self.tester.send_expect("ifconfig %s mtu %s" % (self.txItf, TSO_MTU), "# ") + # set TG port MTU=9000 when need to send big packets. + self.tg_node.send_expect("ifconfig %s mtu %s" % (self.txItf, TSO_MTU), "# ") # set vhost testpmd port MTU=9000 self.pmdout_vhost_user.execute_cmd("stop") self.pmdout_vhost_user.execute_cmd("port stop 0") @@ -213,12 +213,12 @@ class TestVirtioSmoke(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user1) - self.tester.send_expect("ifconfig %s mtu %s" % (self.txItf, DEFAULT_MTU), "# ") + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user1) + self.tg_node.send_expect("ifconfig %s mtu %s" % (self.txItf, DEFAULT_MTU), "# ") diff --git a/tests/TestSuite_virtio_unit_cryptodev_func.py b/tests/TestSuite_virtio_unit_cryptodev_func.py index cd9df373..aa234ba6 100644 --- a/tests/TestSuite_virtio_unit_cryptodev_func.py +++ b/tests/TestSuite_virtio_unit_cryptodev_func.py @@ -18,42 +18,42 @@ from framework.test_case import TestCase class VirtioCryptodevUnitTest(TestCase): def set_up_all(self): - self.sample_app = self.dut.apps_name["vhost_crypto"] - self.user_app = self.dut.apps_name["test"] + self.sample_app = self.sut_node.apps_name["vhost_crypto"] + self.user_app = self.sut_node.apps_name["test"] - self.vm0, self.vm0_dut = None, None - self.dut.skip_setup = True + self.vm0, self.vm0_sut = None, None + self.sut_node.skip_setup = True - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for test") - self.cores = self.dut.get_core_list("1S/3C/1T") - self.mem_channel = self.dut.get_memory_channels() + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for test") + self.cores = self.sut_node.get_core_list("1S/3C/1T") + self.mem_channel = self.sut_node.get_memory_channels() cc.bind_qat_device(self, self.drivername) - self.dut.build_dpdk_apps("./examples/vhost_crypto") + self.sut_node.build_dpdk_apps("./examples/vhost_crypto") self.vf_assign_method = "vfio-pci" - self.dut.setup_modules(None, self.vf_assign_method, None) + self.sut_node.setup_modules(None, self.vf_assign_method, None) - self.dut.restore_interfaces() - self.used_dut_port = self.dut_ports[0] - self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver="default") - self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]["vfs_port"] + self.sut_node.restore_interfaces() + self.used_sut_port = self.sut_ports[0] + self.sut_node.generate_sriov_vfs_by_port(self.used_sut_port, 1, driver="default") + self.sriov_vfs_port = self.sut_node.ports_info[self.used_sut_port]["vfs_port"] for port in self.sriov_vfs_port: port.bind_driver(self.vf_assign_method) - intf = self.dut.ports_info[self.used_dut_port]["intf"] + intf = self.sut_node.ports_info[self.used_sut_port]["intf"] vf_mac = "52:00:00:00:00:01" - self.dut.send_expect("ip link set %s vf 0 mac %s" % (intf, vf_mac), "# ") + self.sut_node.send_expect("ip link set %s vf 0 mac %s" % (intf, vf_mac), "# ") self.launch_vhost_switch() - self.vm0, self.vm0_dut = self.launch_virtio_dut("vm0") + self.vm0, self.vm0_sut = self.launch_virtio_sut("vm0") def set_up(self): pass - def dut_execut_cmd(self, cmdline, ex="#", timout=30): - return self.dut.send_expect(cmdline, ex, timout) + def sut_execut_cmd(self, cmdline, ex="#", timout=30): + return self.sut_node.send_expect(cmdline, ex, timout) def get_vhost_eal(self): default_eal_opts = { @@ -95,45 +95,45 @@ class VirtioCryptodevUnitTest(TestCase): "--config %s --socket-file %s" % (config, socket_file), ) - out = self.dut_execut_cmd(self.vhost_switch_cmd, "socket created", 30) + out = self.sut_execut_cmd(self.vhost_switch_cmd, "socket created", 30) self.logger.info(out) - def set_virtio_pci(self, dut): - out = dut.send_expect("lspci -d:1054|awk '{{print $1}}'", "# ", 10) + def set_virtio_pci(self, sut): + out = sut.send_expect("lspci -d:1054|awk '{{print $1}}'", "# ", 10) virtio_list = out.replace("\r", "\n").replace("\n\n", "\n").split("\n") - dut.send_expect("modprobe uio_pci_generic", "#", 10) + sut.send_expect("modprobe uio_pci_generic", "#", 10) for line in virtio_list: cmd = "echo 0000:{} > /sys/bus/pci/devices/0000\:{}/driver/unbind".format( line, line.replace(":", "\:") ) - dut.send_expect(cmd, "# ", 10) - dut.send_expect( + sut.send_expect(cmd, "# ", 10) + sut.send_expect( 'echo "1af4 1054" > /sys/bus/pci/drivers/uio_pci_generic/new_id', "# ", 10 ) return virtio_list - def launch_virtio_dut(self, vm_name): + def launch_virtio_sut(self, vm_name): # start vm - vm = QEMUKvm(self.dut, vm_name, "virtio_unit_cryptodev_func") + vm = QEMUKvm(self.sut_node, vm_name, "virtio_unit_cryptodev_func") vf0 = {"opt_host": self.sriov_vfs_port[0].pci} vm.set_vm_device(driver=self.vf_assign_method, **vf0) try: - vm_dut = vm.start() - if vm_dut is None: + vm_sut = vm.start() + if vm_sut is None: print(("{} start failed".format(vm_name))) except Exception as err: raise err - vm.virtio_list = self.set_virtio_pci(vm_dut) + vm.virtio_list = self.set_virtio_pci(vm_sut) self.logger.info("{} virtio list: {}".format(vm_name, vm.virtio_list)) - vm.cores = vm_dut.get_core_list("all") + vm.cores = vm_sut.get_core_list("all") self.logger.info("{} core list: {}".format(vm_name, vm.cores)) - vm.ports = [port["pci"] for port in vm_dut.ports_info] + vm.ports = [port["pci"] for port in vm_sut.ports_info] self.logger.info("{} port list: {}".format(vm_name, vm.ports)) - return vm, vm_dut + return vm, vm_sut def test_cryptodev_virtio_autotest(self): eal_opt_str = cc.get_eal_opt_str(self, {"a": None, "vdev": "crypto_virtio"}) @@ -141,40 +141,40 @@ class VirtioCryptodevUnitTest(TestCase): def __run_unit_test(self, testsuite, eal_opt_str="", timeout=600): self.logger.info("STEP_TEST: " + testsuite) - self.vm0_dut.send_expect("dmesg -C", "# ", 30) + self.vm0_sut.send_expect("dmesg -C", "# ", 30) cmd_str = cc.get_dpdk_app_cmd_str(self.user_app, "--log-level 6", eal_opt_str) - info = self.vm0_dut.send_expect(cmd_str, "RTE>>", 30) + info = self.vm0_sut.send_expect(cmd_str, "RTE>>", 30) self.logger.info(info) out = "" try: - out = self.vm0_dut.send_expect(testsuite, "RTE>>", timeout) - self.vm0_dut.send_expect("quit", "# ", 30) + out = self.vm0_sut.send_expect(testsuite, "RTE>>", timeout) + self.vm0_sut.send_expect("quit", "# ", 30) except Exception as err: self.logger.error("Cryptodev Unit Tests Exception") - dmesg = self.vm0_dut.alt_session.send_expect("dmesg", "# ", 30) + dmesg = self.vm0_sut.alt_session.send_expect("dmesg", "# ", 30) self.logger.error("dmesg info:") self.logger.error(dmesg) self.logger.info(out) self.verify("Test OK" in out, "Test Failed") - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() def tear_down(self): pass def tear_down_all(self): if getattr(self, "vm0", None): - self.vm0_dut.kill_all() + self.vm0_sut.kill_all() self.vm0.stop() self.vm0 = None - if getattr(self, "used_dut_port", None) != None: - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - self.used_dut_port = None + if getattr(self, "used_sut_port", None) != None: + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + self.used_sut_port = None - self.dut_execut_cmd("^C", "# ") + self.sut_execut_cmd("^C", "# ") self.app_name = self.sample_app[self.sample_app.rfind("/") + 1 :] - self.dut.send_expect("killall -s INT %s" % self.app_name, "#") - self.dut_execut_cmd("killall -s INT qemu-system-x86_64") - self.dut_execut_cmd("rm -r /tmp/*") + self.sut_node.send_expect("killall -s INT %s" % self.app_name, "#") + self.sut_execut_cmd("killall -s INT qemu-system-x86_64") + self.sut_execut_cmd("rm -r /tmp/*") diff --git a/tests/TestSuite_virtio_user_as_exceptional_path.py b/tests/TestSuite_virtio_user_as_exceptional_path.py index 1f7d8330..b690f891 100644 --- a/tests/TestSuite_virtio_user_as_exceptional_path.py +++ b/tests/TestSuite_virtio_user_as_exceptional_path.py @@ -12,40 +12,40 @@ import time import framework.utils as utils import tests.vhost_peer_conf as peer -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVirtioUserAsExceptionalPath(TestCase): def set_up_all(self): # Get and verify the ports - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.def_driver = self.dut.ports_info[self.dut_ports[0]][ + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.def_driver = self.sut_node.ports_info[self.sut_ports[0]][ "port" ].get_nic_driver() - self.pci0 = self.dut.ports_info[0]["pci"] - pf_info = self.dut_ports[0] - netdev = self.dut.ports_info[pf_info]["port"] + self.pci0 = self.sut_node.ports_info[0]["pci"] + pf_info = self.sut_ports[0] + netdev = self.sut_node.ports_info[pf_info]["port"] self.socket = netdev.get_nic_socket() self.virtio_ip1 = "2.2.2.1" self.virtio_ip2 = "2.2.2.21" self.virtio_mac = "52:54:00:00:00:01" self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # set diff arg about mem_socket base on socket number - if len(set([int(core["socket"]) for core in self.dut.cores])) == 1: + if len(set([int(core["socket"]) for core in self.sut_node.cores])) == 1: self.socket_mem = "1024" else: self.socket_mem = "1024,1024" - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.peer_pci_setup = False self.prepare_dpdk() - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] def set_up(self): @@ -53,15 +53,15 @@ class TestVirtioUserAsExceptionalPath(TestCase): # Run before each test case. # # Clean the execution ENV - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("modprobe vhost-net", "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("modprobe vhost-net", "#") self.peer_pci_setup = False def get_pci_info_from_cfg(self): # Get the port's socket and get the core for testpmd - self.cores = self.dut.get_core_list("1S/2C/1T", socket=self.socket) + self.cores = self.sut_node.get_core_list("1S/2C/1T", socket=self.socket) self.pci = peer.get_pci_info() self.pci_drv = peer.get_pci_driver_info() @@ -75,11 +75,11 @@ class TestVirtioUserAsExceptionalPath(TestCase): "Pls config the direct connection info in vhost_peer_conf.cfg", ) # unbind the port conf in ports.cfg - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver() - bind_script_path = self.dut.get_dpdk_bind_script() - self.dut.send_expect( + bind_script_path = self.sut_node.get_dpdk_bind_script() + self.sut_node.send_expect( "%s --bind=%s %s" % (bind_script_path, self.def_driver, self.pci), "# " ) self.peer_pci_setup = True @@ -92,29 +92,29 @@ class TestVirtioUserAsExceptionalPath(TestCase): comment = " --txq=2 --rxq=2 --nb-cores=1" cores_number = 4 cores_config = "1S/%sC/1T" % cores_number - cores_list = self.dut.get_core_list(cores_config, socket=self.socket) + cores_list = self.sut_node.get_core_list(cores_config, socket=self.socket) self.verify(len(cores_list) >= cores_number, "Failed to get cores list") core_mask = cores_list[0:2] testcmd = self.app_testpmd_path + " " vdev = "--vdev=virtio_user0,mac=%s,path=/dev/vhost-net," % self.virtio_mac - eal_params = self.dut.create_eal_parameters(cores=core_mask, ports=[self.pci0]) + eal_params = self.sut_node.create_eal_parameters(cores=core_mask, ports=[self.pci0]) para = " queue_size=1024,queues=%s -- -i --rxd=1024 --txd=1024 %s" % ( self.queue, comment, ) self.testcmd_start = testcmd + eal_params + vdev + para - self.vhost_user = self.dut.new_session(suite="user") + self.vhost_user = self.sut_node.new_session(suite="user") self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120) self.vhost_user.send_expect("start", "testpmd>", 120) - vhost_pid = self.dut.send_expect( + vhost_pid = self.sut_node.send_expect( "ps -aux | grep vhost | grep -v grep | awk '{print $2}'", "# " ) vhost_pid_list = vhost_pid.split("\r\n") - self.dut.send_expect( + self.sut_node.send_expect( "taskset -pc %s %s" % (cores_list[-1], vhost_pid_list[1]), "# " ) if self.queue == 2: - self.dut.send_expect( + self.sut_node.send_expect( "taskset -pc %s %s" % (cores_list[-2], vhost_pid_list[2]), "# " ) @@ -124,10 +124,10 @@ class TestVirtioUserAsExceptionalPath(TestCase): "--vdev=virtio_user0,mac=%s,path=/dev/vhost-net,queue_size=1024" % self.virtio_mac ) - eal_params = self.dut.create_eal_parameters(cores=self.cores, ports=[self.pci]) + eal_params = self.sut_node.create_eal_parameters(cores=self.cores, ports=[self.pci]) para = " -- -i --rxd=1024 --txd=1024" self.testcmd_start = testcmd + eal_params + vdev + para - self.vhost_user = self.dut.new_session(suite="user") + self.vhost_user = self.sut_node.new_session(suite="user") self.vhost_user.send_expect("modprobe vhost-net", "#", 120) self.vhost_user.send_expect(self.testcmd_start, "testpmd> ", 120) self.vhost_user.send_expect("set fwd csum", "testpmd> ", 120) @@ -150,14 +150,14 @@ class TestVirtioUserAsExceptionalPath(TestCase): self.vhost_user.send_expect("start", "testpmd> ", 120) def set_route_table(self): - self.dut.send_expect("ifconfig tap0 up", "#") - self.dut.send_expect("ifconfig tap0 2.2.2.2/24 up", "#") - self.dut.send_expect("route add -net 2.2.2.0/24 gw 2.2.2.1 dev tap0", "#") - self.dut.send_expect("arp -s 2.2.2.1 %s" % self.virtio_mac, "#") + self.sut_node.send_expect("ifconfig tap0 up", "#") + self.sut_node.send_expect("ifconfig tap0 2.2.2.2/24 up", "#") + self.sut_node.send_expect("route add -net 2.2.2.0/24 gw 2.2.2.1 dev tap0", "#") + self.sut_node.send_expect("arp -s 2.2.2.1 %s" % self.virtio_mac, "#") def prepare_tap_device(self): - self.dut.send_expect("ifconfig tap0 up", "#") - self.dut.send_expect("ifconfig tap0 1.1.1.2", "#") + self.sut_node.send_expect("ifconfig tap0 up", "#") + self.sut_node.send_expect("ifconfig tap0 1.1.1.2", "#") def testpmd_reset(self): self.vhost_user.send_expect("stop", "testpmd> ", 120) @@ -168,42 +168,42 @@ class TestVirtioUserAsExceptionalPath(TestCase): def config_kernel_nic_host(self): # - self.dut.send_expect("ip netns del ns1", "#") - self.dut.send_expect("ip netns add ns1", "#") - self.dut.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") - self.dut.send_expect( + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.send_expect("ip netns add ns1", "#") + self.sut_node.send_expect("ip link set %s netns ns1" % self.nic_in_kernel, "#") + self.sut_node.send_expect( "ip netns exec ns1 ifconfig %s 1.1.1.8 up" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ethtool -K %s gro on" % self.nic_in_kernel, "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 ethtool -K %s tso on" % self.nic_in_kernel, "#" ) def prepare_dpdk(self): # # Changhe the testpmd checksum fwd code for mac change - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly.c ./app/test-pmd/csumonly_backup.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/ether_addr_copy(&peer_eth/i\#if 0' ./app/test-pmd/csumonly.c", "#" ) - self.dut.send_expect( + self.sut_node.send_expect( "sed -i '/parse_ethernet(eth_hdr, &info/i\#endif' ./app/test-pmd/csumonly.c", "#", ) - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.build_install_dpdk(self.sut_node.target) time.sleep(3) def unprepare_dpdk(self): # Recovery the DPDK code to original - self.dut.send_expect( + self.sut_node.send_expect( "cp ./app/test-pmd/csumonly_backup.c ./app/test-pmd/csumonly.c ", "#" ) - self.dut.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") - self.dut.build_install_dpdk(self.dut.target) + self.sut_node.send_expect("rm -rf ./app/test-pmd/csumonly_backup.c", "#") + self.sut_node.build_install_dpdk(self.sut_node.target) def iperf_result_verify(self, vm_client, direction): """ @@ -241,7 +241,7 @@ class TestVirtioUserAsExceptionalPath(TestCase): self.result_table_create(header_row) frame_size = 64 tgen_input = [] - port = self.tester.get_local_port(self.dut_ports[0]) + port = self.tg_node.get_local_port(self.sut_ports[0]) payload = ( frame_size - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["tcp"] ) @@ -251,10 +251,10 @@ class TestVirtioUserAsExceptionalPath(TestCase): self.virtio_ip1, payload, ) - self.tester.scapy_append( + self.tg_node.scapy_append( 'wrpcap("%s/exceptional_path.pcap", %s)' % (self.out_path, flow1) ) - self.tester.scapy_execute() + self.tg_node.scapy_execute() tgen_input.append((port, port, "%s/exceptional_path.pcap" % self.out_path)) for rate_value in range(20, -1, -1): rate_value = rate_value * 0.5 @@ -263,12 +263,12 @@ class TestVirtioUserAsExceptionalPath(TestCase): "dst": {"range": 1, "step": 1, "action": "inc"}, }, } - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, rate_value, vm_config, self.tester.pktgen + tgen_input, rate_value, vm_config, self.tg_node.perf_tg ) options = {"duration": 5, "rate": rate_value, "delay": 5} - result = self.tester.pktgen.measure_loss( + result = self.tg_node.perf_tg.measure_loss( stream_ids=streams, options=options ) tx_pkts = result[1] @@ -287,53 +287,53 @@ class TestVirtioUserAsExceptionalPath(TestCase): self.get_pci_info_from_cfg() self.config_kernel_nic_host() self.launch_testpmd_exception_path() - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) time.sleep(5) # Get the virtio-net device name self.prepare_tap_device() self.testpmd_reset() - self.dut.send_expect("ip netns exec ns1 iperf -s -i 1", "", 10) - self.iperf = self.dut.new_session(suite="iperf") + self.sut_node.send_expect("ip netns exec ns1 iperf -s -i 1", "", 10) + self.iperf = self.sut_node.new_session(suite="iperf") self.iperf.send_expect("rm /root/iperf_client.log", "#", 10) self.iperf.send_expect( "iperf -c 1.1.1.8 -i 1 -t 10 > /root/iperf_client.log &", "", 180 ) time.sleep(30) - self.dut.send_expect("^C", "#", 10) + self.sut_node.send_expect("^C", "#", 10) self.iperf_result_verify(self.iperf, "direction_TAP_original") self.logger.info("TAP->virtio-user->Kernel_NIC %s " % (self.output_result)) self.iperf.send_expect("rm /root/iperf_client.log", "#", 10) self.vhost_user.send_expect("quit", "#", 120) - self.dut.close_session(self.vhost_user) - self.dut.send_expect("ip netns del ns1", "#") - self.dut.close_session(self.iperf) + self.sut_node.close_session(self.vhost_user) + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.close_session(self.iperf) def test_vhost_exception_path_NIC_original(self): self.get_pci_info_from_cfg() self.config_kernel_nic_host() self.launch_testpmd_exception_path() time.sleep(5) - self.dut.get_session_output(timeout=2) + self.sut_node.get_session_output(timeout=2) self.prepare_tap_device() self.testpmd_reset() - self.iperf = self.dut.new_session(suite="iperf") - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.iperf = self.sut_node.new_session(suite="iperf") + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) self.iperf.send_expect("iperf -s -i 1", "", 180) - self.dut.send_expect( + self.sut_node.send_expect( "ip netns exec ns1 iperf -c 1.1.1.2 -i 1 -t 10 > /root/iperf_client.log &", "", 10, ) time.sleep(30) self.iperf.send_expect("^C", "#", 10) - self.iperf_result_verify(self.dut, "direction_NIC_original") - self.dut.get_session_output(timeout=2) + self.iperf_result_verify(self.sut_node, "direction_NIC_original") + self.sut_node.get_session_output(timeout=2) self.logger.info("Kernel_NIC<-virtio-user<-TAP %s " % (self.output_result)) - self.dut.send_expect("rm /root/iperf_client.log", "#", 10) + self.sut_node.send_expect("rm /root/iperf_client.log", "#", 10) self.vhost_user.send_expect("quit", "#", 120) - self.dut.close_session(self.vhost_user) - self.dut.send_expect("ip netns del ns1", "#") - self.dut.close_session(self.iperf) + self.sut_node.close_session(self.vhost_user) + self.sut_node.send_expect("ip netns del ns1", "#") + self.sut_node.close_session(self.iperf) def test_perf_vhost_single_queue(self): self.queue = 1 @@ -351,17 +351,17 @@ class TestVirtioUserAsExceptionalPath(TestCase): # # Run after each test case. # - self.dut.kill_all() - self.dut.close_session(self.vhost_user) - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf ./vhost-net", "#") + self.sut_node.kill_all() + self.sut_node.close_session(self.vhost_user) + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf ./vhost-net", "#") time.sleep(2) if self.peer_pci_setup: - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % (self.peer_pci), "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.peer_pci), "# ", 30, @@ -372,15 +372,15 @@ class TestVirtioUserAsExceptionalPath(TestCase): Run after each test suite. """ # bind the port conf in ports.cfg - for i in self.dut_ports: - port = self.dut.ports_info[i]["port"] + for i in self.sut_ports: + port = self.sut_node.ports_info[i]["port"] port.bind_driver(self.def_driver) self.unprepare_dpdk() if self.peer_pci_setup: - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -b %s %s" % (self.pci_drv, self.pci), "# ", 30, diff --git a/tests/TestSuite_virtio_user_for_container_networking.py b/tests/TestSuite_virtio_user_for_container_networking.py index 790b88fb..e0178723 100644 --- a/tests/TestSuite_virtio_user_for_container_networking.py +++ b/tests/TestSuite_virtio_user_for_container_networking.py @@ -14,9 +14,9 @@ from copy import deepcopy import framework.rst as rst import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVirtioUserForContainer(TestCase): @@ -26,23 +26,23 @@ class TestVirtioUserForContainer(TestCase): """ self.queue_number = 1 self.nb_cores = 1 - self.dut_ports = self.dut.get_ports() - self.mem_channels = self.dut.get_memory_channels() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") + self.sut_ports = self.sut_node.get_ports() + self.mem_channels = self.sut_node.get_memory_channels() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"] + HEADER_SIZE["udp"] self.docker_image = "ubuntu:latest" - self.container_base_dir = self.dut.base_dir + self.container_base_dir = self.sut_node.base_dir self.container_base_dir = self.container_base_dir.replace("~", "/root") self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.number_of_ports = 1 - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] self.save_result_flag = True self.json_obj = {} @@ -51,10 +51,10 @@ class TestVirtioUserForContainer(TestCase): """ Run before each test case. """ - self.dut.send_expect("rm -rf ./vhost-net*", "# ") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user = self.dut.new_session(suite="virtio-user") + self.sut_node.send_expect("rm -rf ./vhost-net*", "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user = self.sut_node.new_session(suite="virtio-user") # Prepare the result table self.virtio_mac = "00:11:22:33:44:10" self.table_header = ["Frame"] @@ -75,7 +75,7 @@ class TestVirtioUserForContainer(TestCase): def get_core_mask(self): core_config = "1S/%dC/1T" % (self.nb_cores * 2 + 2) - core_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + core_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) self.verify( len(core_list) >= (self.nb_cores * 2 + 2), "There has not enought cores to test this case %s" % self.running_case, @@ -89,31 +89,31 @@ class TestVirtioUserForContainer(TestCase): def send_and_verify(self): """ - Send packet with packet generator and verify + Send packet with traffic generator and verify """ for frame_size in self.test_parameters.keys(): self.throughput[frame_size] = dict() payload_size = frame_size - self.headers_size tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - self.tester.scapy_append( + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_node.scapy_append( 'wrpcap("%s/vhost.pcap", [Ether(dst="%s")/IP()/UDP()/("X"*%d)])' % (self.out_path, self.virtio_mac, payload_size) ) tgen_input.append((tx_port, rx_port, "%s/vhost.pcap" % self.out_path)) - self.tester.scapy_execute() - self.tester.pktgen.clear_streams() + self.tg_node.scapy_execute() + self.tg_node.perf_tg.clear_streams() vm_config = { "mac": { "dst": {"range": 1, "step": 1, "action": "inc"}, }, } streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config, self.tester.pktgen + tgen_input, 100, vm_config, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) Mpps = pps / 1000000.0 self.throughput[frame_size][self.nb_desc] = Mpps throughput = Mpps * 100 / float(self.wirespeed(self.nic, frame_size, 1)) @@ -126,7 +126,7 @@ class TestVirtioUserForContainer(TestCase): @property def check_2M_env(self): - hugepage_size = self.dut.send_expect( + hugepage_size = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if hugepage_size == "2048" else False @@ -135,8 +135,8 @@ class TestVirtioUserForContainer(TestCase): """ start testpmd as vhost """ - self.pci_info = self.dut.ports_info[0]["pci"] - eal_param = self.dut.create_eal_parameters( + self.pci_info = self.sut_node.ports_info[0]["pci"] + eal_param = self.sut_node.create_eal_parameters( cores=self.core_list_vhost_user, prefix="vhost", vdevs=["net_vhost0,iface=vhost-net,queues=%d,client=0" % self.queue_number], @@ -195,8 +195,8 @@ class TestVirtioUserForContainer(TestCase): """ self.virtio_user.send_expect("quit", "# ", 60) self.vhost_user.send_expect("quit", "# ", 60) - self.dut.close_session(self.vhost_user) - self.dut.close_session(self.virtio_user) + self.sut_node.close_session(self.vhost_user) + self.sut_node.close_session(self.virtio_user) def handle_expected(self): """ @@ -348,7 +348,7 @@ class TestVirtioUserForContainer(TestCase): """ Run after each test case. """ - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "# ") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "# ") def tear_down_all(self): """ diff --git a/tests/TestSuite_vlan.py b/tests/TestSuite_vlan.py index b2889ae2..d815a85c 100644 --- a/tests/TestSuite_vlan.py +++ b/tests/TestSuite_vlan.py @@ -12,8 +12,8 @@ Test the support of VLAN Offload Features by Poll Mode Drivers. import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase @@ -25,34 +25,34 @@ class TestVlan(TestCase): Vlan Prerequisites """ - global dutRxPortId - global dutTxPortId + global sutRxPortId + global sutTxPortId # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() # Verify that enough ports are available self.verify(len(ports) >= 1, "Insufficient ports") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] - dutRxPortId = valports[0] - dutTxPortId = valports[0] + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] + sutRxPortId = valports[0] + sutTxPortId = valports[0] portMask = utils.create_mask(valports[:1]) - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd( "Default", "--portmask=%s --port-topology=loop" % portMask ) - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("set promisc all off", "testpmd> ") - self.dut.send_expect("vlan set filter on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set strip off %s" % dutRxPortId, "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("vlan set filter on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip off %s" % sutRxPortId, "testpmd> ") self.vlan = 51 def get_tcpdump_package(self): - pkts = self.tester.load_tcpdump_sniff_packets(self.inst) + pkts = self.tg_node.load_tcpdump_sniff_packets(self.inst) vlans = [] for i in range(len(pkts)): vlan = pkts.strip_element_vlan("vlan", p_index=i) @@ -63,29 +63,29 @@ class TestVlan(TestCase): """ Send $num of packets to portid, if vid is -1, it means send a packet which does not include a vlan id. """ - self.pmdout.wait_link_status_up(dutRxPortId) - # The package stream : testTxPort->dutRxPort->dutTxport->testRxPort - port = self.tester.get_local_port(dutRxPortId) - self.txItf = self.tester.get_interface(port) - self.smac = self.tester.get_mac(port) + self.pmdout.wait_link_status_up(sutRxPortId) + # The package stream : testTxPort->sutRxPort->sutTxport->testRxPort + port = self.tg_node.get_local_port(sutRxPortId) + self.txItf = self.tg_node.get_interface(port) + self.smac = self.tg_node.get_mac(port) - port = self.tester.get_local_port(dutTxPortId) - self.rxItf = self.tester.get_interface(port) + port = self.tg_node.get_local_port(sutTxPortId) + self.rxItf = self.tg_node.get_interface(port) - # the packet dest mac must is dut tx port id when the port promisc is off - self.dmac = self.dut.get_mac_address(dutRxPortId) + # the packet dest mac must is SUT tx port id when the port promisc is off + self.dmac = self.sut_node.get_mac_address(sutRxPortId) - self.inst = self.tester.tcpdump_sniff_packets(self.rxItf) + self.inst = self.tg_node.tcpdump_sniff_packets(self.rxItf) # FIXME send a burst with only num packet if vid == -1: - pkt = Packet(pkt_type="UDP") - pkt.config_layer("ether", {"dst": self.dmac, "src": self.smac}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP") + scapy_pkt_builder.config_layer("ether", {"dst": self.dmac, "src": self.smac}) else: - pkt = Packet(pkt_type="VLAN_UDP") - pkt.config_layer("ether", {"dst": self.dmac, "src": self.smac}) - pkt.config_layer("vlan", {"vlan": vid}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP") + scapy_pkt_builder.config_layer("ether", {"dst": self.dmac, "src": self.smac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": vid}) - pkt.send_pkt(self.tester, tx_port=self.txItf, count=4, timeout=30) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf, count=4, timeout=30) def set_up(self): """ @@ -97,12 +97,12 @@ class TestVlan(TestCase): """ Enable receipt of VLAN packets and strip off """ - self.dut.send_expect( - "rx_vlan add %d %s" % (self.vlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add %d %s" % (self.vlan, sutRxPortId), "testpmd> " ) - self.dut.send_expect("vlan set strip off %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) - out = self.dut.send_expect("show port info %s" % dutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("vlan set strip off %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) + out = self.sut_node.send_expect("show port info %s" % sutRxPortId, "testpmd> ", 20) self.verify("strip off" in out, "Wrong strip:" + out) self.vlan_send_packet(self.vlan) @@ -115,60 +115,60 @@ class TestVlan(TestCase): self.verify(len(out) == 0, "Received unexpected packet, filter not work!!!") self.verify(notmatch_vlan not in out, "Wrong vlan:" + str(out)) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") def test_vlan_disable_receipt(self): """ Disable receipt of VLAN packets """ - self.dut.send_expect("rx_vlan rm %d %s" % (self.vlan, dutRxPortId), "testpmd> ") - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("rx_vlan rm %d %s" % (self.vlan, sutRxPortId), "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ", 120) self.vlan_send_packet(self.vlan) out = self.get_tcpdump_package() self.verify(len(out) == 0, "Received unexpected packet, filter not work!!!") self.verify(self.vlan not in out, "Wrong vlan:" + str(out)) - self.dut.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ") def test_vlan_enable_receipt_strip_on(self): """ Enable receipt of VLAN packets and strip on """ - self.dut.send_expect("vlan set strip on %s" % dutRxPortId, "testpmd> ", 20) - self.dut.send_expect( - "rx_vlan add %d %s" % (self.vlan, dutRxPortId), "testpmd> ", 20 + self.sut_node.send_expect("vlan set strip on %s" % sutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect( + "rx_vlan add %d %s" % (self.vlan, sutRxPortId), "testpmd> ", 20 ) - out = self.dut.send_expect("show port info %s" % dutRxPortId, "testpmd> ", 20) + out = self.sut_node.send_expect("show port info %s" % sutRxPortId, "testpmd> ", 20) self.verify("strip on" in out, "Wrong strip:" + out) - self.dut.send_expect("start", "testpmd> ", 120) + self.sut_node.send_expect("start", "testpmd> ", 120) self.vlan_send_packet(self.vlan) out = self.get_tcpdump_package() self.verify(len(out), "Forwarded vlan packet not received!!!") self.verify(self.vlan not in out, "Wrong vlan:" + str(out)) - self.dut.send_expect("stop", "testpmd> ", 120) + self.sut_node.send_expect("stop", "testpmd> ", 120) def test_vlan_enable_vlan_insertion(self): """ Enable VLAN header insertion in transmitted packets """ - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect( - "tx_vlan set %s %d" % (dutTxPortId, self.vlan), "testpmd> " + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect( + "tx_vlan set %s %d" % (sutTxPortId, self.vlan), "testpmd> " ) - self.dut.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.vlan_send_packet(-1) out = self.get_tcpdump_package() self.verify(self.vlan in out, "Vlan not found:" + str(out)) - self.dut.send_expect("stop", "testpmd> ") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan reset %s" % dutTxPortId, "testpmd> ", 30) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("stop", "testpmd> ", 30) + self.sut_node.send_expect("stop", "testpmd> ") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan reset %s" % sutTxPortId, "testpmd> ", 30) + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("stop", "testpmd> ", 30) def tear_down(self): """ @@ -180,5 +180,5 @@ class TestVlan(TestCase): """ Run after each test suite. """ - self.dut.send_expect("quit", "# ", 30) - self.dut.kill_all() + self.sut_node.send_expect("quit", "# ", 30) + self.sut_node.kill_all() diff --git a/tests/TestSuite_vlan_ethertype_config.py b/tests/TestSuite_vlan_ethertype_config.py index 8838310e..782d1dc5 100644 --- a/tests/TestSuite_vlan_ethertype_config.py +++ b/tests/TestSuite_vlan_ethertype_config.py @@ -33,20 +33,20 @@ class TestVlanEthertypeConfig(TestCase): Vlan Prerequisites """ - global dutRxPortId - global dutTxPortId + global sutRxPortId + global sutTxPortId # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() # Verify that enough ports are available self.verify(len(ports) >= 2, "Insufficient ports") - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] - dutRxPortId = valports[0] - dutTxPortId = valports[1] - port = self.tester.get_local_port(dutTxPortId) - self.rxItf = self.tester.get_interface(port) + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] + sutRxPortId = valports[0] + sutTxPortId = valports[1] + port = self.tg_node.get_local_port(sutTxPortId) + self.rxItf = self.tg_node.get_interface(port) self.portmask = utils.create_mask(valports[:2]) @@ -54,25 +54,25 @@ class TestVlanEthertypeConfig(TestCase): """ Run before each test case. """ - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) self.pmdout.start_testpmd("Default", "--portmask=%s" % self.portmask) if self.kdriver == "i40e": - self.dut.send_expect("set promisc all off", "testpmd> ") + self.sut_node.send_expect("set promisc all off", "testpmd> ") def start_tcpdump(self, rxItf): - self.tester.alt_session.send_expect( + self.tg_node.alt_session.send_expect( "rm -rf /tmp/getPkgByTcpdump_%s.cap" % rxItf, "#" ) - self.tester.alt_session.send_expect( + self.tg_node.alt_session.send_expect( "tcpdump -i %s -w /tmp/getPkgByTcpdump_%s.cap" % (rxItf, rxItf), "listening on", ) def get_tcpdump_packet(self, rxItf): - recv_pattern = self.tester.alt_session.send_expect("^C", "#") + recv_pattern = self.tg_node.alt_session.send_expect("^C", "#") fmt = '1/1 "%02x"' - out = self.tester.send_expect( + out = self.tg_node.send_expect( "hexdump -ve '%s' '/tmp/getPkgByTcpdump_%s.cap'" % (fmt, rxItf), "# " ) return out @@ -83,22 +83,22 @@ class TestVlanEthertypeConfig(TestCase): """ if vid is -1, it means send pakcage not include vlan id. """ - self.pmdout.wait_link_status_up(dutRxPortId) + self.pmdout.wait_link_status_up(sutRxPortId) self.tpid_ori_file = "/tmp/tpid_ori.pcap" self.tpid_new_file = "/tmp/tpid_new.pcap" - self.tester.send_expect("rm -rf /tmp/tpid_ori.pcap", "# ") - self.tester.send_expect("rm -rf /tmp/tpid_new.pcap", "# ") - # The package stream : testTxPort->dutRxPort->dutTxport->testRxPort - port = self.tester.get_local_port(dutRxPortId) - self.txItf = self.tester.get_interface(port) - self.smac = self.tester.get_mac(port) + self.tg_node.send_expect("rm -rf /tmp/tpid_ori.pcap", "# ") + self.tg_node.send_expect("rm -rf /tmp/tpid_new.pcap", "# ") + # The package stream : testTxPort->sutRxPort->sutTxport->testRxPort + port = self.tg_node.get_local_port(sutRxPortId) + self.txItf = self.tg_node.get_interface(port) + self.smac = self.tg_node.get_mac(port) - port = self.tester.get_local_port(dutTxPortId) - self.rxItf = self.tester.get_interface(port) + port = self.tg_node.get_local_port(sutTxPortId) + self.rxItf = self.tg_node.get_interface(port) - # the package dect mac must is dut tx port id when the port promisc is + # the package dect mac must is SUT tx port id when the port promisc is # off - self.dmac = self.dut.get_mac_address(dutRxPortId) + self.dmac = self.sut_node.get_mac_address(sutRxPortId) pkt = [] if outer_vid < 0 or outer_tpid <= 0: @@ -113,7 +113,7 @@ class TestVlanEthertypeConfig(TestCase): ] wrpcap(self.tpid_ori_file, pkt) fmt = '1/1 "%02x"' - out = self.tester.send_expect( + out = self.tg_node.send_expect( "hexdump -ve '%s' '%s'" % (fmt, self.tpid_ori_file), "# " ) if inner_vid < 0 or inner_tpid <= 0: @@ -126,16 +126,16 @@ class TestVlanEthertypeConfig(TestCase): + str("%04x" % inner_vid) ) fmt = '1/1 "%02x"' - out = self.tester.send_expect( + out = self.tg_node.send_expect( "hexdump -ve '%s' '%s' |sed 's/8100000181000002/%s/' |xxd -r -p > '%s'" % (fmt, self.tpid_ori_file, replace, self.tpid_new_file), "# ", ) - self.tester.scapy_foreground() - self.tester.scapy_append("pkt=rdpcap('%s')" % self.tpid_new_file) - self.tester.scapy_append("sendp(pkt, iface='%s', count=4)" % self.txItf) - self.tester.scapy_execute() + self.tg_node.scapy_foreground() + self.tg_node.scapy_append("pkt=rdpcap('%s')" % self.tpid_new_file) + self.tg_node.scapy_append("sendp(pkt, iface='%s', count=4)" % self.txItf) + self.tg_node.scapy_execute() def check_vlan_packets(self, vlan, tpid, rxItf, result=True): @@ -154,20 +154,20 @@ class TestVlanEthertypeConfig(TestCase): Test Case 1: change VLAN TPID """ random_vlan = random.randint(1, MAX_VLAN - 1) - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("vlan set filter off %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set strip on %s" % dutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("vlan set filter off %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip on %s" % sutRxPortId, "testpmd> ", 20) rx_vlans = [1, random_vlan, MAX_VLAN] tpids = [0x8100, 0xA100] for tpid in tpids: - self.dut.send_expect( - "vlan set outer tpid 0x%x %s" % (tpid, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "vlan set outer tpid 0x%x %s" % (tpid, sutRxPortId), "testpmd> " ) for rx_vlan in rx_vlans: self.vlan_send_packet(rx_vlan, tpid) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify( "RTE_MBUF_F_RX_VLAN" in out, "Vlan recognized error:" + str(out) ) @@ -177,8 +177,8 @@ class TestVlanEthertypeConfig(TestCase): Disable receipt of VLAN packets """ random_vlan = random.randint(1, MAX_VLAN - 1) - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("vlan set strip off %s" % dutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("vlan set strip off %s" % sutRxPortId, "testpmd> ", 20) rx_vlans = [1, random_vlan, MAX_VLAN] # caium_a063 card support only default '0x8100' tpid in rx mode if self.nic in ["cavium_a063", "cavium_a064"]: @@ -186,19 +186,19 @@ class TestVlanEthertypeConfig(TestCase): else: tpids = [0x8100, 0xA100] for tpid in tpids: - self.dut.send_expect( - "vlan set outer tpid 0x%x %s" % (tpid, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "vlan set outer tpid 0x%x %s" % (tpid, sutRxPortId), "testpmd> " ) for rx_vlan in rx_vlans: # test vlan filter on - self.dut.send_expect( - "vlan set filter on %s" % dutRxPortId, "testpmd> " + self.sut_node.send_expect( + "vlan set filter on %s" % sutRxPortId, "testpmd> " ) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") self.check_vlan_packets(rx_vlan, tpid, self.rxItf, False) # test vlan filter off - self.dut.send_expect( - "vlan set filter off %s" % dutRxPortId, "testpmd> " + self.sut_node.send_expect( + "vlan set filter off %s" % sutRxPortId, "testpmd> " ) self.check_vlan_packets(rx_vlan, tpid, self.rxItf) @@ -208,10 +208,10 @@ class TestVlanEthertypeConfig(TestCase): """ random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("vlan set filter on %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set strip off %s" % dutRxPortId, "testpmd> ", 20) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("vlan set filter on %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip off %s" % sutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("start", "testpmd> ") # caium_a063 card support only default '0x8100' tpid in rx mode if self.nic in ["cavium_a063", "cavium_a064"]: @@ -219,21 +219,21 @@ class TestVlanEthertypeConfig(TestCase): else: tpids = [0x8100, 0xA100] for tpid in tpids: - self.dut.send_expect( - "vlan set outer tpid 0x%x %s" % (tpid, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "vlan set outer tpid 0x%x %s" % (tpid, sutRxPortId), "testpmd> " ) for rx_vlan in rx_vlans: - self.dut.send_expect( - "rx_vlan add 0x%x %s" % (rx_vlan, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "rx_vlan add 0x%x %s" % (rx_vlan, sutRxPortId), "testpmd> " ) self.check_vlan_packets(rx_vlan, tpid, self.rxItf) - self.dut.send_expect( - "rx_vlan rm 0x%x %d" % (rx_vlan, dutRxPortId), "testpmd> ", 30 + self.sut_node.send_expect( + "rx_vlan rm 0x%x %d" % (rx_vlan, sutRxPortId), "testpmd> ", 30 ) self.check_vlan_packets(rx_vlan, tpid, self.rxItf, False) - self.dut.send_expect("stop", "testpmd> ", 30) + self.sut_node.send_expect("stop", "testpmd> ", 30) def test_vlan_strip(self): """ @@ -241,10 +241,10 @@ class TestVlanEthertypeConfig(TestCase): """ random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("vlan set filter off %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set strip on %s" % dutRxPortId, "testpmd> ", 20) - self.dut.send_expect("start", "testpmd> ", 20) + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("vlan set filter off %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip on %s" % sutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("start", "testpmd> ", 20) # caium_a063 card support only default '0x8100' tpid in rx mode if self.nic in ["cavium_a063", "cavium_a064"]: @@ -252,16 +252,16 @@ class TestVlanEthertypeConfig(TestCase): else: tpids = [0x8100, 0xA100] for tpid in tpids: - self.dut.send_expect( - "vlan set outer tpid 0x%x %s" % (tpid, dutRxPortId), "testpmd> " + self.sut_node.send_expect( + "vlan set outer tpid 0x%x %s" % (tpid, sutRxPortId), "testpmd> " ) for rx_vlan in rx_vlans: - self.dut.send_expect( - "vlan set strip on %s" % dutRxPortId, "testpmd> ", 20 + self.sut_node.send_expect( + "vlan set strip on %s" % sutRxPortId, "testpmd> ", 20 ) self.check_vlan_packets(rx_vlan, tpid, self.rxItf, False) - self.dut.send_expect( - "vlan set strip off %s" % dutRxPortId, "testpmd> ", 20 + self.sut_node.send_expect( + "vlan set strip off %s" % sutRxPortId, "testpmd> ", 20 ) self.check_vlan_packets(rx_vlan, tpid, self.rxItf) @@ -271,10 +271,10 @@ class TestVlanEthertypeConfig(TestCase): """ random_vlan = random.randint(1, MAX_VLAN - 1) tx_vlans = [2, random_vlan, MAX_VLAN] - self.dut.send_expect("set fwd mac", "testpmd> ") - self.dut.send_expect("vlan set filter off %s" % dutRxPortId, "testpmd> ") - self.dut.send_expect("vlan set strip off %s" % dutRxPortId, "testpmd> ", 20) - self.dut.send_expect("start", "testpmd> ") + self.sut_node.send_expect("set fwd mac", "testpmd> ") + self.sut_node.send_expect("vlan set filter off %s" % sutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set strip off %s" % sutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("start", "testpmd> ") # caium_a063 card support only default '0x8100' tpid in rx mode if self.nic in ["cavium_a063", "cavium_a064", "IGC-I225_LM"]: @@ -282,17 +282,17 @@ class TestVlanEthertypeConfig(TestCase): else: tpids = [0x8100, 0xA100] for tpid in tpids: - self.dut.send_expect( - "vlan set outer tpid 0x%x %s" % (tpid, dutTxPortId), "testpmd> " + self.sut_node.send_expect( + "vlan set outer tpid 0x%x %s" % (tpid, sutTxPortId), "testpmd> " ) for tx_vlan in tx_vlans: - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect( - "tx_vlan set %s 0x%x" % (dutTxPortId, tx_vlan), "testpmd> " + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect( + "tx_vlan set %s 0x%x" % (sutTxPortId, tx_vlan), "testpmd> " ) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd>") self.start_tcpdump(self.rxItf) self.vlan_send_packet(-1) out = self.get_tcpdump_packet(self.rxItf) @@ -300,11 +300,11 @@ class TestVlanEthertypeConfig(TestCase): self.verify(vlan_string in out, "Wrong vlan:" + str(out)) self.verify(str("%x" % tpid) in out, "Wrong vlan:" + str(out)) self.verify(str("%x" % tx_vlan) in out, "Vlan not found:" + str(out)) - self.dut.send_expect("stop", "testpmd>") - self.dut.send_expect("port stop all", "testpmd> ") - self.dut.send_expect("tx_vlan reset %s" % dutTxPortId, "testpmd> ", 30) - self.dut.send_expect("port start all", "testpmd> ") - self.dut.send_expect("start", "testpmd>") + self.sut_node.send_expect("stop", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd> ") + self.sut_node.send_expect("tx_vlan reset %s" % sutTxPortId, "testpmd> ", 30) + self.sut_node.send_expect("port start all", "testpmd> ") + self.sut_node.send_expect("start", "testpmd>") self.start_tcpdump(self.rxItf) self.vlan_send_packet(-1) out = self.get_tcpdump_packet(self.rxItf) @@ -331,20 +331,20 @@ class TestVlanEthertypeConfig(TestCase): random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] - self.dut.send_expect("vlan set extend on %d" % dutRxPortId, "testpmd> ", 20) - self.dut.send_expect("set verbose 1", "testpmd> ") - self.dut.send_expect("set fwd rxonly", "testpmd> ") - self.dut.send_expect("start", "testpmd> ") - self.dut.send_expect("vlan set filter off %s" % dutRxPortId, "testpmd> ") + self.sut_node.send_expect("vlan set extend on %d" % sutRxPortId, "testpmd> ", 20) + self.sut_node.send_expect("set verbose 1", "testpmd> ") + self.sut_node.send_expect("set fwd rxonly", "testpmd> ") + self.sut_node.send_expect("start", "testpmd> ") + self.sut_node.send_expect("vlan set filter off %s" % sutRxPortId, "testpmd> ") tpids = [0x8100, 0xA100, 0x88A8, 0x9100] for outer_tpid in tpids: for inner_tpid in tpids: - self.dut.send_expect( - "vlan set outer tpid 0x%x %s" % (outer_tpid, dutRxPortId), + self.sut_node.send_expect( + "vlan set outer tpid 0x%x %s" % (outer_tpid, sutRxPortId), "testpmd> ", ) - self.dut.send_expect( - "vlan set inner tpid 0x%x %s" % (inner_tpid, dutRxPortId), + self.sut_node.send_expect( + "vlan set inner tpid 0x%x %s" % (inner_tpid, sutRxPortId), "testpmd> ", ) for outer_vlan in rx_vlans: @@ -352,19 +352,19 @@ class TestVlanEthertypeConfig(TestCase): self.vlan_send_packet( outer_vlan, outer_tpid, inner_vlan, inner_tpid ) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.verify("QinQ VLAN" in out, "Wrong QinQ:" + str(out)) def tear_down(self): """ Run after each test case. """ - self.dut.send_expect("stop", "testpmd> ", 30) - self.dut.send_expect("quit", "# ", 30) + self.sut_node.send_expect("stop", "testpmd> ", 30) + self.sut_node.send_expect("quit", "# ", 30) pass def tear_down_all(self): """ Run after each test suite. """ - self.dut.kill_all() + self.sut_node.kill_all() diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py index ac18105e..32246d54 100644 --- a/tests/TestSuite_vm2vm_virtio_net_perf.py +++ b/tests/TestSuite_vm2vm_virtio_net_perf.py @@ -24,10 +24,10 @@ from framework.virt_common import VM class TestVM2VMVirtioNetPerf(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) core_config = "1S/5C/1T" - self.cores_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + self.cores_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) self.verify( len(self.cores_list) >= 4, "There has not enough cores to test this suite %s" % self.suite_name, @@ -37,26 +37,26 @@ class TestVM2VMVirtioNetPerf(TestCase): self.virtio_ip2 = "1.1.1.3" self.virtio_mac1 = "52:54:00:00:00:01" self.virtio_mac2 = "52:54:00:00:00:02" - self.base_dir = self.dut.base_dir.replace("~", "/root") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") self.random_string = string.ascii_letters + string.digits - socket_num = len(set([int(core["socket"]) for core in self.dut.cores])) + socket_num = len(set([int(core["socket"]) for core in self.sut_node.cores])) self.socket_mem = ",".join(["2048"] * socket_num) - self.vhost = self.dut.new_session(suite="vhost") - self.pmd_vhost = PmdOutput(self.dut, self.vhost) - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.vhost = self.sut_node.new_session(suite="vhost") + self.pmd_vhost = PmdOutput(self.sut_node, self.vhost) + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] # get cbdma device self.cbdma_dev_infos = [] self.dmas_info = None self.device_str = None self.checked_vm = False - self.dut.restore_interfaces() + self.sut_node.restore_interfaces() def set_up(self): """ run before each test case. """ - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vm_dut = [] + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vm_sut = [] self.vm = [] def get_cbdma_ports_info_and_bind_to_dpdk( @@ -65,7 +65,7 @@ class TestVM2VMVirtioNetPerf(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -100,7 +100,7 @@ class TestVM2VMVirtioNetPerf(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -109,11 +109,11 @@ class TestVM2VMVirtioNetPerf(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -122,7 +122,7 @@ class TestVM2VMVirtioNetPerf(TestCase): @property def check_2m_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -190,7 +190,7 @@ class TestVM2VMVirtioNetPerf(TestCase): cbdma_arg_0, ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores_list, prefix="vhost", no_pci=no_pci ) if rxq_txq is None: @@ -215,8 +215,8 @@ class TestVM2VMVirtioNetPerf(TestCase): start two VM, each VM has one virtio device """ for i in range(self.vm_num): - vm_dut = None - vm_info = VM(self.dut, "vm%d" % i, vm_config) + vm_sut = None + vm_info = VM(self.sut_node, "vm%d" % i, vm_config) vm_params = {} vm_params["driver"] = "vhost-user" if not server_mode: @@ -229,39 +229,39 @@ class TestVM2VMVirtioNetPerf(TestCase): vm_params["opt_settings"] = self.vm_args vm_info.set_vm_device(**vm_params) try: - vm_dut = vm_info.start(set_target=False) - if vm_dut is None: + vm_sut = vm_info.start(set_target=False) + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print(utils.RED("Failure for %s" % str(e))) - self.verify(vm_dut is not None, "start vm failed") - self.vm_dut.append(vm_dut) + self.verify(vm_sut is not None, "start vm failed") + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def config_vm_env(self, combined=False, rxq_txq=1): """ set virtio device IP and run arp protocal """ - vm1_intf = self.vm_dut[0].ports_info[0]["intf"] - vm2_intf = self.vm_dut[1].ports_info[0]["intf"] + vm1_intf = self.vm_sut[0].ports_info[0]["intf"] + vm2_intf = self.vm_sut[1].ports_info[0]["intf"] if combined: - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "ethtool -L %s combined %d" % (vm1_intf, rxq_txq), "#", 10 ) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10 ) if combined: - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ethtool -L %s combined %d" % (vm2_intf, rxq_txq), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10 ) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "arp -s %s %s" % (self.virtio_ip2, self.virtio_mac2), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10 ) @@ -307,8 +307,8 @@ class TestVM2VMVirtioNetPerf(TestCase): elif iperf_mode == "ufo": iperf_server = "iperf -s -u -i 1" iperf_client = "iperf -c 1.1.1.2 -i 1 -t 30 -P 4 -u -b 1G -l 9000" - self.vm_dut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10) - self.vm_dut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60) + self.vm_sut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10) + self.vm_sut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60) time.sleep(90) def get_perf_result(self): @@ -317,8 +317,8 @@ class TestVM2VMVirtioNetPerf(TestCase): """ self.table_header = ["Mode", "[M|G]bits/sec"] self.result_table_create(self.table_header) - self.vm_dut[0].send_expect("pkill iperf", "# ") - self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir) + self.vm_sut[0].send_expect("pkill iperf", "# ") + self.vm_sut[1].session.copy_file_from("%s/iperf_client.log" % self.sut_node.base_dir) fp = open("./iperf_client.log") fmsg = fp.read() fp.close() @@ -339,11 +339,11 @@ class TestVM2VMVirtioNetPerf(TestCase): results_row = ["vm2vm", iperfdata[-1]] self.result_table_add(results_row) - # print iperf resut + # print iperf result self.result_table_print() # rm the iperf log file in vm - self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10) + self.vm_sut[0].send_expect("rm iperf_server.log", "#", 10) + self.vm_sut[1].send_expect("rm iperf_client.log", "#", 10) return float(iperfdata[-1].split()[0]) def verify_xstats_info_on_vhost(self): @@ -415,17 +415,17 @@ class TestVM2VMVirtioNetPerf(TestCase): data = "" for char in range(file_size * 1024): data += random.choice(self.random_string) - self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") + self.vm_sut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") # scp this file to vm1 - out = self.vm_dut[1].send_command( + out = self.vm_sut[1].send_command( "scp root@%s:/tmp/payload /root" % self.virtio_ip1, timeout=5 ) if "Are you sure you want to continue connecting" in out: - self.vm_dut[1].send_command("yes", timeout=3) - self.vm_dut[1].send_command(self.vm[0].password, timeout=3) + self.vm_sut[1].send_command("yes", timeout=3) + self.vm_sut[1].send_command(self.vm[0].password, timeout=3) # get the file info in vm1, and check it valid - md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ") - md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ") + md5_send = self.vm_sut[0].send_expect("md5sum /tmp/payload", "# ") + md5_revd = self.vm_sut[1].send_expect("md5sum /root/payload", "# ") md5_send = md5_send[: md5_send.find(" ")] md5_revd = md5_revd[: md5_revd.find(" ")] self.verify( @@ -509,8 +509,8 @@ class TestVM2VMVirtioNetPerf(TestCase): rxq_txq=None, ) self.start_vms() - self.offload_capbility_check(self.vm_dut[0]) - self.offload_capbility_check(self.vm_dut[1]) + self.offload_capbility_check(self.vm_sut[0]) + self.offload_capbility_check(self.vm_sut[1]) def test_vm2vm_split_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue( self, @@ -808,8 +808,8 @@ class TestVM2VMVirtioNetPerf(TestCase): rxq_txq=None, ) self.start_vms() - self.offload_capbility_check(self.vm_dut[0]) - self.offload_capbility_check(self.vm_dut[1]) + self.offload_capbility_check(self.vm_sut[0]) + self.offload_capbility_check(self.vm_sut[1]) def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue( self, @@ -956,13 +956,13 @@ class TestVM2VMVirtioNetPerf(TestCase): run after each test case. """ self.stop_all_apps() - self.dut.kill_all() + self.sut_node.kill_all() self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.bind_nic_driver(self.dut_ports, self.drivername) + self.bind_nic_driver(self.sut_ports, self.drivername) if getattr(self, "vhost", None): - self.dut.close_session(self.vhost) + self.sut_node.close_session(self.vhost) diff --git a/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py b/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py index 8dad7be5..ee9df0c5 100644 --- a/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py +++ b/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py @@ -24,29 +24,29 @@ from framework.virt_common import VM class TestVM2VMVirtioNetPerfCbdma(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.cores_list[0:9] self.vm_num = 2 self.virtio_ip1 = "1.1.1.1" self.virtio_ip2 = "1.1.1.2" self.virtio_mac1 = "52:54:00:00:00:01" self.virtio_mac2 = "52:54:00:00:00:02" - self.base_dir = self.dut.base_dir.replace("~", "/root") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") self.random_string = string.ascii_letters + string.digits - socket_num = len(set([int(core["socket"]) for core in self.dut.cores])) + socket_num = len(set([int(core["socket"]) for core in self.sut_node.cores])) self.socket_mem = ",".join(["2048"] * socket_num) - self.vhost = self.dut.new_session(suite="vhost") - self.pmdout_vhost_user = PmdOutput(self.dut, self.vhost) - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.vhost = self.sut_node.new_session(suite="vhost") + self.pmdout_vhost_user = PmdOutput(self.sut_node, self.vhost) + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] def set_up(self): """ run before each test case. """ - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vm_dut = [] + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vm_sut = [] self.vm = [] def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): @@ -56,7 +56,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -80,7 +80,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -115,11 +115,11 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): return lcore_dma_param def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -127,7 +127,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -147,8 +147,8 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): start two VM, each VM has one virtio device """ for i in range(self.vm_num): - vm_dut = None - vm_info = VM(self.dut, "vm%d" % i, vm_config) + vm_sut = None + vm_info = VM(self.sut_node, "vm%d" % i, vm_config) vm_params = {} vm_params["driver"] = "vhost-user" if not server_mode: @@ -160,31 +160,31 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): vm_params["opt_settings"] = self.vm_args vm_info.set_vm_device(**vm_params) try: - vm_dut = vm_info.start(set_target=False) - if vm_dut is None: + vm_sut = vm_info.start(set_target=False) + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print(utils.RED("Failure for %s" % str(e))) - self.verify(vm_dut is not None, "start vm failed") - self.vm_dut.append(vm_dut) + self.verify(vm_sut is not None, "start vm failed") + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def config_vm_ip(self): """ set virtio device IP and run arp protocal """ - vm1_intf = self.vm_dut[0].ports_info[0]["intf"] - vm2_intf = self.vm_dut[1].ports_info[0]["intf"] - self.vm_dut[0].send_expect( + vm1_intf = self.vm_sut[0].ports_info[0]["intf"] + vm2_intf = self.vm_sut[1].ports_info[0]["intf"] + self.vm_sut[0].send_expect( "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10 ) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "arp -s %s %s" % (self.virtio_ip2, self.virtio_mac2), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10 ) @@ -192,17 +192,17 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): """ set virtio device combined """ - vm1_intf = self.vm_dut[0].ports_info[0]["intf"] - vm2_intf = self.vm_dut[1].ports_info[0]["intf"] - self.vm_dut[0].send_expect( + vm1_intf = self.vm_sut[0].ports_info[0]["intf"] + vm2_intf = self.vm_sut[1].ports_info[0]["intf"] + self.vm_sut[0].send_expect( "ethtool -L %s combined %d" % (vm1_intf, combined), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ethtool -L %s combined %d" % (vm2_intf, combined), "#", 10 ) def check_ping_between_vms(self): - ping_out = self.vm_dut[0].send_expect( + ping_out = self.vm_sut[0].send_expect( "ping {} -c 4".format(self.virtio_ip2), "#", 20 ) self.logger.info(ping_out) @@ -215,8 +215,8 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): server = "iperf -s -i 1" client = "iperf -c {} -i 1 -t 60".format(self.virtio_ip1) - self.vm_dut[0].send_expect("{} > iperf_server.log &".format(server), "", 10) - self.vm_dut[1].send_expect("{} > iperf_client.log &".format(client), "", 10) + self.vm_sut[0].send_expect("{} > iperf_server.log &".format(server), "", 10) + self.vm_sut[1].send_expect("{} > iperf_client.log &".format(client), "", 10) time.sleep(60) def get_perf_result(self): @@ -225,8 +225,8 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): """ self.table_header = ["Mode", "[M|G]bits/sec"] self.result_table_create(self.table_header) - self.vm_dut[0].send_expect("pkill iperf", "# ") - self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir) + self.vm_sut[0].send_expect("pkill iperf", "# ") + self.vm_sut[1].session.copy_file_from("%s/iperf_client.log" % self.sut_node.base_dir) fp = open("./iperf_client.log") fmsg = fp.read() fp.close() @@ -246,8 +246,8 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): # print iperf resut self.result_table_print() # rm the iperf log file in vm - self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10) + self.vm_sut[0].send_expect("rm iperf_server.log", "#", 10) + self.vm_sut[1].send_expect("rm iperf_client.log", "#", 10) def verify_xstats_info_on_vhost(self): """ @@ -304,17 +304,17 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): data = "" for char in range(file_size * 1024): data += random.choice(self.random_string) - self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") + self.vm_sut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") # scp this file to vm1 - out = self.vm_dut[1].send_command( + out = self.vm_sut[1].send_command( "scp root@%s:/tmp/payload /root" % self.virtio_ip1, timeout=5 ) if "Are you sure you want to continue connecting" in out: - self.vm_dut[1].send_command("yes", timeout=3) - self.vm_dut[1].send_command(self.vm[0].password, timeout=3) + self.vm_sut[1].send_command("yes", timeout=3) + self.vm_sut[1].send_command(self.vm[0].password, timeout=3) # get the file info in vm1, and check it valid - md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ") - md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ") + md5_send = self.vm_sut[0].send_expect("md5sum /tmp/payload", "# ") + md5_revd = self.vm_sut[1].send_expect("md5sum /root/payload", "# ") md5_send = md5_send[: md5_send.find(" ")] md5_revd = md5_revd[: md5_revd.find(" ")] self.verify( @@ -971,11 +971,11 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase): run after each test case. """ self.stop_all_apps() - self.dut.kill_all() + self.sut_node.kill_all() self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost) + self.sut_node.close_session(self.vhost) diff --git a/tests/TestSuite_vm2vm_virtio_pmd.py b/tests/TestSuite_vm2vm_virtio_pmd.py index 70d39afc..96b22f9e 100644 --- a/tests/TestSuite_vm2vm_virtio_pmd.py +++ b/tests/TestSuite_vm2vm_virtio_pmd.py @@ -15,30 +15,30 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase from framework.virt_common import VM class TestVM2VMVirtioPMD(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports() - self.bind_nic_driver(self.dut_ports) - self.memory_channel = self.dut.get_memory_channels() + self.sut_ports = self.sut_node.get_ports() + self.bind_nic_driver(self.sut_ports) + self.memory_channel = self.sut_node.get_memory_channels() self.vm_num = 2 self.dump_pcap = "/root/pdump-rx.pcap" - socket_num = len(set([int(core["socket"]) for core in self.dut.cores])) + socket_num = len(set([int(core["socket"]) for core in self.sut_node.cores])) self.socket_mem = ",".join(["1024"] * socket_num) - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.vhost_user = self.dut.new_session(suite="vhost") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.vhost_user = self.sut_node.new_session(suite="vhost") self.virtio_user0 = None self.virtio_user1 = None - self.pci_info = self.dut.ports_info[0]["pci"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] - self.app_pdump = self.dut.apps_name["pdump"] + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] + self.app_pdump = self.sut_node.apps_name["pdump"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] - self.pmd_vhost = PmdOutput(self.dut, self.vhost_user) + self.pmd_vhost = PmdOutput(self.sut_node, self.vhost_user) self.cbdma_dev_infos = [] self.vm_config = "vhost_sample" self.device_str = " " @@ -55,10 +55,10 @@ class TestVM2VMVirtioPMD(TestCase): "Path", ] self.result_table_create(self.table_header) - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vm_dut = [] + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.vm_sut = [] self.vm = [] def get_core_list(self, cores_num): @@ -66,7 +66,7 @@ class TestVM2VMVirtioPMD(TestCase): create core mask """ self.core_config = "1S/%dC/1T" % cores_num - self.cores_list = self.dut.get_core_list(self.core_config) + self.cores_list = self.sut_node.get_core_list(self.core_config) self.verify( len(self.cores_list) >= cores_num, "There has not enough cores to test this case %s" % self.running_case, @@ -80,7 +80,7 @@ class TestVM2VMVirtioPMD(TestCase): testcmd = self.app_testpmd_path + " " vdev1 = "--vdev 'net_vhost0,iface=%s/vhost-net0,queues=1' " % self.base_dir vdev2 = "--vdev 'net_vhost1,iface=%s/vhost-net1,queues=1' " % self.base_dir - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=vhost_mask, no_pci=True, prefix="vhost" ) para = " -- -i --nb-cores=1 --txd=1024 --rxd=1024" @@ -91,7 +91,7 @@ class TestVM2VMVirtioPMD(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -100,14 +100,14 @@ class TestVM2VMVirtioPMD(TestCase): """ launch the testpmd as virtio with vhost_net1 """ - self.virtio_user1 = self.dut.new_session(suite="virtio_user1") + self.virtio_user1 = self.sut_node.new_session(suite="virtio_user1") virtio_mask = self.cores_list[2:4] testcmd = self.app_testpmd_path + " " vdev = ( "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=1,%s " % path_mode ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=virtio_mask, no_pci=True, prefix="virtio", ports=[self.pci_info] ) if self.check_2M_env: @@ -122,14 +122,14 @@ class TestVM2VMVirtioPMD(TestCase): """ launch the testpmd as virtio with vhost_net0 """ - self.virtio_user0 = self.dut.new_session(suite="virtio_user0") + self.virtio_user0 = self.sut_node.new_session(suite="virtio_user0") virtio_mask = self.cores_list[4:6] testcmd = self.app_testpmd_path + " " vdev = ( "--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=1,%s " % path_mode ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=virtio_mask, no_pci=True, prefix="virtio0", ports=[self.pci_info] ) if self.check_2M_env: @@ -178,12 +178,12 @@ class TestVM2VMVirtioPMD(TestCase): command = command + "--txd=1024 --rxd=1024 %s" vm_client.send_expect(command % (w_pci_str, extern_param), "testpmd> ", 20) - def launch_pdump_to_capture_pkt(self, client_dut, dump_port): + def launch_pdump_to_capture_pkt(self, client_sut, dump_port): """ bootup pdump in VM """ - self.pdump_session = client_dut.new_session(suite="pdump") - if hasattr(client_dut, "vm_name"): + self.pdump_session = client_sut.new_session(suite="pdump") + if hasattr(client_sut, "vm_name"): command_line = ( self.app_pdump + " " @@ -201,7 +201,7 @@ class TestVM2VMVirtioPMD(TestCase): + "--pdump '%s,queue=*,rx-dev=%s,mbuf-size=8000'" ) self.pdump_session.send_expect( - command_line % (self.dut.prefix_subfix, dump_port, self.dump_pcap), + command_line % (self.sut_node.prefix_subfix, dump_port, self.dump_pcap), "Port", ) @@ -223,8 +223,8 @@ class TestVM2VMVirtioPMD(TestCase): vm_params["opt_queue"] = opt_queue for i in range(self.vm_num): - vm_dut = None - vm_info = VM(self.dut, "vm%d" % i, vm_config) + vm_sut = None + vm_info = VM(self.sut_node, "vm%d" % i, vm_config) vm_params["driver"] = "vhost-user" if not server_mode: @@ -236,13 +236,13 @@ class TestVM2VMVirtioPMD(TestCase): vm_info.set_vm_device(**vm_params) time.sleep(3) try: - vm_dut = vm_info.start() - if vm_dut is None: + vm_sut = vm_info.start() + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) raise e - self.vm_dut.append(vm_dut) + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def calculate_avg_throughput(self): @@ -271,29 +271,29 @@ class TestVM2VMVirtioPMD(TestCase): start to send packets and verify it """ # start to send packets - self.vm_dut[0].send_expect("set fwd rxonly", "testpmd> ", 10) - self.vm_dut[0].send_command("start", 3) - self.vm_dut[1].send_expect("set fwd txonly", "testpmd> ", 10) - self.vm_dut[1].send_expect("set txpkts 64", "testpmd> ", 10) - self.vm_dut[1].send_expect("start tx_first 32", "testpmd> ", 10) + self.vm_sut[0].send_expect("set fwd rxonly", "testpmd> ", 10) + self.vm_sut[0].send_command("start", 3) + self.vm_sut[1].send_expect("set fwd txonly", "testpmd> ", 10) + self.vm_sut[1].send_expect("set txpkts 64", "testpmd> ", 10) + self.vm_sut[1].send_expect("start tx_first 32", "testpmd> ", 10) Mpps = self.calculate_avg_throughput() self.update_table_info(mode, 64, Mpps, path) self.result_table_print() - def check_packet_payload_valid(self, client_dut): + def check_packet_payload_valid(self, client_sut): """ check the payload is valid """ # stop pdump self.pdump_session.send_expect("^c", "# ", 60) # quit testpmd - client_dut.send_expect("quit", "#", 60) + client_sut.send_expect("quit", "#", 60) time.sleep(2) - client_dut.session.copy_file_from( + client_sut.session.copy_file_from( src="%s" % self.dump_pcap, dst="%s" % self.dump_pcap ) - pkt = Packet() - pkts = pkt.read_pcapfile(self.dump_pcap) + scapy_pkt_builder = ScapyPacketBuilder() + pkts = scapy_pkt_builder.read_pcapfile(self.dump_pcap) self.verify(len(pkts) == 10, "The vm0 do not capture all the packets") data = str(pkts[0]["Raw"]) for i in range(1, 10): @@ -304,16 +304,16 @@ class TestVM2VMVirtioPMD(TestCase): def stop_all_apps(self): for i in range(len(self.vm)): - self.vm_dut[i].send_expect("quit", "#", 20) + self.vm_sut[i].send_expect("quit", "#", 20) self.vm[i].stop() self.vhost_user.send_expect("quit", "#", 30) if self.virtio_user1: self.virtio_user1.send_expect("quit", "# ", 30) - self.dut.close_session(self.virtio_user1) + self.sut_node.close_session(self.virtio_user1) self.virtio_user1 = None if self.virtio_user0: self.virtio_user0.send_expect("quit", "# ", 30) - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.virtio_user0) self.virtio_user0 = None def test_vhost_vm2vm_virtio_pmd_with_normal_path(self): @@ -325,8 +325,8 @@ class TestVM2VMVirtioPMD(TestCase): self.get_core_list(2) self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) - self.start_vm_testpmd(self.vm_dut[0], path_mode) - self.start_vm_testpmd(self.vm_dut[1], path_mode) + self.start_vm_testpmd(self.vm_sut[0], path_mode) + self.start_vm_testpmd(self.vm_sut[1], path_mode) self.send_and_verify(mode="virtio 0.95 normal path", path=path_mode) def test_vhost_vm2vm_virito_10_pmd_with_normal_path(self): @@ -338,8 +338,8 @@ class TestVM2VMVirtioPMD(TestCase): self.get_core_list(2) self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) - self.start_vm_testpmd(self.vm_dut[0], path_mode) - self.start_vm_testpmd(self.vm_dut[1], path_mode) + self.start_vm_testpmd(self.vm_sut[0], path_mode) + self.start_vm_testpmd(self.vm_sut[1], path_mode) self.send_and_verify(mode="virtio 1.0 normal path", path=path_mode) def test_vhost_vm2vm_virtio_pmd_with_vector_rx_path(self): @@ -352,14 +352,14 @@ class TestVM2VMVirtioPMD(TestCase): self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) self.start_vm_testpmd( - self.vm_dut[0], + self.vm_sut[0], path_mode, - virtio_net_pci=self.vm_dut[0].ports_info[0]["pci"], + virtio_net_pci=self.vm_sut[0].ports_info[0]["pci"], ) self.start_vm_testpmd( - self.vm_dut[1], + self.vm_sut[1], path_mode, - virtio_net_pci=self.vm_dut[1].ports_info[0]["pci"], + virtio_net_pci=self.vm_sut[1].ports_info[0]["pci"], ) self.send_and_verify(mode="virtio 0.95 vector_rx", path=path_mode) @@ -373,14 +373,14 @@ class TestVM2VMVirtioPMD(TestCase): self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) self.start_vm_testpmd( - self.vm_dut[0], + self.vm_sut[0], path_mode, - virtio_net_pci=self.vm_dut[0].ports_info[0]["pci"], + virtio_net_pci=self.vm_sut[0].ports_info[0]["pci"], ) self.start_vm_testpmd( - self.vm_dut[1], + self.vm_sut[1], path_mode, - virtio_net_pci=self.vm_dut[1].ports_info[0]["pci"], + virtio_net_pci=self.vm_sut[1].ports_info[0]["pci"], ) self.send_and_verify(mode="virtio 1.0 vector_rx", path=path_mode) @@ -396,19 +396,19 @@ class TestVM2VMVirtioPMD(TestCase): self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) # git the vm enough huge to run pdump - self.vm_dut[0].set_huge_pages(2048) + self.vm_sut[0].set_huge_pages(2048) # start testpmd and pdump in VM0 - self.start_vm_testpmd(self.vm_dut[0], path_mode, extern_param) - self.vm_dut[0].send_expect("set fwd rxonly", "testpmd> ", 30) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) - self.launch_pdump_to_capture_pkt(self.vm_dut[0], dump_port) + self.start_vm_testpmd(self.vm_sut[0], path_mode, extern_param) + self.vm_sut[0].send_expect("set fwd rxonly", "testpmd> ", 30) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) + self.launch_pdump_to_capture_pkt(self.vm_sut[0], dump_port) # start testpmd in VM1 and start to send packet - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[1].send_expect("set txpkts 2000,2000,2000,2000", "testpmd> ", 30) - self.vm_dut[1].send_expect("set burst 1", "testpmd> ", 30) - self.vm_dut[1].send_expect("start tx_first 10", "testpmd> ", 30) + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[1].send_expect("set txpkts 2000,2000,2000,2000", "testpmd> ", 30) + self.vm_sut[1].send_expect("set burst 1", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 10", "testpmd> ", 30) # check the packet in vm0 - self.check_packet_payload_valid(self.vm_dut[0]) + self.check_packet_payload_valid(self.vm_sut[0]) def test_vhost_vm2vm_virito_10_pmd_with_mergeable_path(self): """ @@ -422,19 +422,19 @@ class TestVM2VMVirtioPMD(TestCase): self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) # git the vm enough huge to run pdump - self.vm_dut[0].set_huge_pages(2048) + self.vm_sut[0].set_huge_pages(2048) # start testpmd and pdump in VM0 - self.start_vm_testpmd(self.vm_dut[0], path_mode, extern_param) - self.vm_dut[0].send_expect("set fwd rxonly", "testpmd> ", 30) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) - self.launch_pdump_to_capture_pkt(self.vm_dut[0], dump_port) + self.start_vm_testpmd(self.vm_sut[0], path_mode, extern_param) + self.vm_sut[0].send_expect("set fwd rxonly", "testpmd> ", 30) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) + self.launch_pdump_to_capture_pkt(self.vm_sut[0], dump_port) # start testpmd in VM1 and start to send packet - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[1].send_expect("set txpkts 2000,2000,2000,2000", "testpmd> ", 30) - self.vm_dut[1].send_expect("set burst 1", "testpmd> ", 30) - self.vm_dut[1].send_expect("start tx_first 10", "testpmd> ", 30) + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[1].send_expect("set txpkts 2000,2000,2000,2000", "testpmd> ", 30) + self.vm_sut[1].send_expect("set burst 1", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 10", "testpmd> ", 30) # check the packet in vm0 - self.check_packet_payload_valid(self.vm_dut[0]) + self.check_packet_payload_valid(self.vm_sut[0]) def test_vhost_vm2vm_virito_11_pmd_with_normal_path(self): """ @@ -445,8 +445,8 @@ class TestVM2VMVirtioPMD(TestCase): self.get_core_list(2) self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) - self.start_vm_testpmd(self.vm_dut[0], path_mode) - self.start_vm_testpmd(self.vm_dut[1], path_mode) + self.start_vm_testpmd(self.vm_sut[0], path_mode) + self.start_vm_testpmd(self.vm_sut[1], path_mode) self.send_and_verify(mode="virtio 1.0 normal path", path=path_mode) def test_vhost_vm2vm_virito_11_pmd_with_mergeable_path(self): @@ -461,19 +461,19 @@ class TestVM2VMVirtioPMD(TestCase): self.start_vhost_testpmd() self.start_vms(setting_args=setting_args) # git the vm enough huge to run pdump - self.vm_dut[0].set_huge_pages(2048) + self.vm_sut[0].set_huge_pages(2048) # start testpmd and pdump in VM0 - self.start_vm_testpmd(self.vm_dut[0], path_mode, extern_param) - self.vm_dut[0].send_expect("set fwd rxonly", "testpmd> ", 30) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) - self.launch_pdump_to_capture_pkt(self.vm_dut[0], dump_port) + self.start_vm_testpmd(self.vm_sut[0], path_mode, extern_param) + self.vm_sut[0].send_expect("set fwd rxonly", "testpmd> ", 30) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) + self.launch_pdump_to_capture_pkt(self.vm_sut[0], dump_port) # start testpmd in VM1 and start to send packet - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[1].send_expect("set txpkts 2000,2000,2000,2000", "testpmd> ", 30) - self.vm_dut[1].send_expect("set burst 1", "testpmd> ", 30) - self.vm_dut[1].send_expect("start tx_first 10", "testpmd> ", 30) + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[1].send_expect("set txpkts 2000,2000,2000,2000", "testpmd> ", 30) + self.vm_sut[1].send_expect("set burst 1", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 10", "testpmd> ", 30) # check the packet in vm0 - self.check_packet_payload_valid(self.vm_dut[0]) + self.check_packet_payload_valid(self.vm_sut[0]) def test_vhost_vm2vm_virtio_split_ring_with_mergeable_path_cbdma_enable(self): """ @@ -500,19 +500,19 @@ class TestVM2VMVirtioPMD(TestCase): rxq_txq=8, ) self.logger.info("Launch testpmd in VM1") - self.start_vm_testpmd(self.vm_dut[0], path_mode, extern_param) - self.vm_dut[0].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) + self.start_vm_testpmd(self.vm_sut[0], path_mode, extern_param) + self.vm_sut[0].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) self.logger.info("Launch testpmd in VM2, sent imix pkts from VM2") - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[1].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[1].send_expect( + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[1].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[1].send_expect( "set txpkts 64,256,512,1024,2000,64,256,512,1024,2000", "testpmd> ", 30 ) - self.vm_dut[1].send_expect("start tx_first 1", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 1", "testpmd> ", 30) self.logger.info("Check imix packets") - self.check_port_stats_result(self.vm_dut[0], queue_num=8) - self.check_port_stats_result(self.vm_dut[1], queue_num=8) + self.check_port_stats_result(self.vm_sut[0], queue_num=8) + self.check_port_stats_result(self.vm_sut[1], queue_num=8) self.logger.info("Relaunch vhost side testpmd and Check imix packets 10 times") for _ in range(10): self.pmd_vhost.execute_cmd("quit", "#") @@ -524,10 +524,10 @@ class TestVM2VMVirtioPMD(TestCase): nb_cores=4, rxq_txq=8, ) - self.vm_dut[1].send_expect("stop", "testpmd> ", 30) - self.vm_dut[1].send_expect("start tx_first 32", "testpmd> ", 30) - self.check_port_stats_result(self.vm_dut[0], queue_num=8) - self.check_port_stats_result(self.vm_dut[1], queue_num=8) + self.vm_sut[1].send_expect("stop", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 32", "testpmd> ", 30) + self.check_port_stats_result(self.vm_sut[0], queue_num=8) + self.check_port_stats_result(self.vm_sut[1], queue_num=8) def test_vhost_vm2vm_split_ring_with_mergeable_path_and_server_mode_cbdma_enable( self, @@ -556,19 +556,19 @@ class TestVM2VMVirtioPMD(TestCase): rxq_txq=4, ) self.logger.info("Launch testpmd in VM1") - self.start_vm_testpmd(self.vm_dut[0], path_mode, extern_param) - self.vm_dut[0].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) + self.start_vm_testpmd(self.vm_sut[0], path_mode, extern_param) + self.vm_sut[0].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) self.logger.info("Launch testpmd in VM2 and send imix pkts") - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[1].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[1].send_expect( + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[1].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[1].send_expect( "set txpkts 64,256,512,1024,2000,64,256,512,1024,2000", "testpmd> ", 30 ) - self.vm_dut[1].send_expect("start tx_first 32", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 32", "testpmd> ", 30) self.logger.info("Check imix packets") - self.check_port_stats_result(self.vm_dut[0], queue_num=4) - self.check_port_stats_result(self.vm_dut[1], queue_num=4) + self.check_port_stats_result(self.vm_sut[0], queue_num=4) + self.check_port_stats_result(self.vm_sut[1], queue_num=4) self.logger.info("Relaunch vhost side testpmd and Check imix packets 10 times") for _ in range(10): self.pmd_vhost.execute_cmd("quit", "#") @@ -580,10 +580,10 @@ class TestVM2VMVirtioPMD(TestCase): nb_cores=4, rxq_txq=8, ) - self.vm_dut[1].send_expect("stop", "testpmd> ", 30) - self.vm_dut[1].send_expect("start tx_first 32", "testpmd> ", 30) - self.check_port_stats_result(self.vm_dut[0], queue_num=8) - self.check_port_stats_result(self.vm_dut[1], queue_num=8) + self.vm_sut[1].send_expect("stop", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 32", "testpmd> ", 30) + self.check_port_stats_result(self.vm_sut[0], queue_num=8) + self.check_port_stats_result(self.vm_sut[1], queue_num=8) def test_vhost_vm2vm_packed_ring_with_mergeable_path_and_8queues_cbdma_enable(self): """ @@ -609,40 +609,40 @@ class TestVM2VMVirtioPMD(TestCase): rxq_txq=8, ) self.logger.info("Launch testpmd in VM1") - self.start_vm_testpmd(self.vm_dut[0], path_mode, extern_param) + self.start_vm_testpmd(self.vm_sut[0], path_mode, extern_param) self.logger.info("Launch testpmd in VM2 and send imix pkts") - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[0].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) - self.vm_dut[1].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[1].send_expect( + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[0].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) + self.vm_sut[1].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[1].send_expect( "set txpkts 64,256,512,1024,20000,64,256,512,1024,20000", "testpmd> ", 30 ) - self.vm_dut[1].send_expect("start tx_first 32", "testpmd> ", 30) + self.vm_sut[1].send_expect("start tx_first 32", "testpmd> ", 30) self.logger.info("Check imix packets") - self.check_port_stats_result(self.vm_dut[0]) - self.check_port_stats_result(self.vm_dut[1]) + self.check_port_stats_result(self.vm_sut[0]) + self.check_port_stats_result(self.vm_sut[1]) self.logger.info("Quit VM2 and relaunch VM2 with split ring") - self.vm_dut[1].send_expect("quit", "#", 20) + self.vm_sut[1].send_expect("quit", "#", 20) self.vm[1].stop() time.sleep(5) try: - self.vm_dut[1].send_expect("poweroff", "", 20) + self.vm_sut[1].send_expect("poweroff", "", 20) except Exception as e: self.logger.info(e) time.sleep(10) self.start_one_vms( mode=1, server_mode=False, opt_queue=8, vm_config=self.vm_config ) - self.start_vm_testpmd(self.vm_dut[1], path_mode, extern_param) - self.vm_dut[0].send_expect("start", "testpmd> ", 30) - self.vm_dut[1].send_expect("set fwd mac", "testpmd> ", 30) - self.vm_dut[1].send_expect( + self.start_vm_testpmd(self.vm_sut[1], path_mode, extern_param) + self.vm_sut[0].send_expect("start", "testpmd> ", 30) + self.vm_sut[1].send_expect("set fwd mac", "testpmd> ", 30) + self.vm_sut[1].send_expect( "set txpkts 64,256,512,1024,20000,64,256,512,1024,20000", "testpmd> ", 30 ) - self.vm_dut[1].send_expect("start tx_first 32", "testpmd> ", 30) - self.check_port_stats_result(self.vm_dut[0], queue_num=8) - self.check_port_stats_result(self.vm_dut[1], queue_num=8) + self.vm_sut[1].send_expect("start tx_first 32", "testpmd> ", 30) + self.check_port_stats_result(self.vm_sut[0], queue_num=8) + self.check_port_stats_result(self.vm_sut[1], queue_num=8) def start_one_vms( self, @@ -678,8 +678,8 @@ class TestVM2VMVirtioPMD(TestCase): ",csum=on,gso=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on" ) - vm_dut = None - vm_info = VM(self.dut, "vm%d" % vm_index, vm_config) + vm_sut = None + vm_info = VM(self.sut_node, "vm%d" % vm_index, vm_config) vm_params["driver"] = "vhost-user" if not server_mode: vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % vm_index @@ -692,29 +692,29 @@ class TestVM2VMVirtioPMD(TestCase): vm_info.set_vm_device(**vm_params) time.sleep(3) try: - vm_dut = vm_info.start() - if vm_dut is None: + vm_sut = vm_info.start() + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) raise e - self.vm_dut[-1] = vm_dut + self.vm_sut[-1] = vm_sut self.vm[-1] = vm_info - def check_port_stats_result(self, vm_dut, queue_num=0): - out = vm_dut.send_expect("show port stats all", "testpmd> ", 30) + def check_port_stats_result(self, vm_sut, queue_num=0): + out = vm_sut.send_expect("show port stats all", "testpmd> ", 30) rx_packets = re.findall(r"RX-packets: (\w+)", out) tx_packets = re.findall(r"TX-packets: (\w+)", out) self.verify(int(rx_packets[0]) > 1, "RX packets no correctly") self.verify(int(tx_packets[0]) > 1, "TX packets no correctly") - self.check_packets_of_each_queue(vm_dut, queue_num) - # vm_dut.send_expect('stop', 'testpmd> ', 30) + self.check_packets_of_each_queue(vm_sut, queue_num) + # vm_sut.send_expect('stop', 'testpmd> ', 30) - def check_packets_of_each_queue(self, vm_dut, queue_num): + def check_packets_of_each_queue(self, vm_sut, queue_num): """ check each queue has receive packets """ - out = vm_dut.send_expect("stop", "testpmd> ", 60) + out = vm_sut.send_expect("stop", "testpmd> ", 60) for queue_index in range(queue_num): queue = "Queue= %d" % queue_index index = out.find(queue) @@ -727,8 +727,8 @@ class TestVM2VMVirtioPMD(TestCase): "The queue %d rx-packets or tx-packets is 0 about " % queue_index + "rx-packets:%d, tx-packets:%d" % (rx_packets, tx_packets), ) - vm_dut.send_expect("clear port stats all", "testpmd> ", 30) - vm_dut.send_expect("start", "testpmd> ", 30) + vm_sut.send_expect("clear port stats all", "testpmd> ", 30) + vm_sut.send_expect("start", "testpmd> ", 30) def prepare_test_env( self, @@ -814,7 +814,7 @@ class TestVM2VMVirtioPMD(TestCase): enable_queues, cbdma_arg_1, ) - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores=self.cores_list, prefix="vhost", no_pci=no_pci ) if rxq_txq is None: @@ -840,7 +840,7 @@ class TestVM2VMVirtioPMD(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -888,7 +888,7 @@ class TestVM2VMVirtioPMD(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -897,11 +897,11 @@ class TestVM2VMVirtioPMD(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -913,11 +913,11 @@ class TestVM2VMVirtioPMD(TestCase): # Run after each test case. # self.stop_all_apps() - self.dut.kill_all() + self.sut_node.kill_all() self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) diff --git a/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py b/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py index b926534e..73186130 100644 --- a/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py +++ b/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py @@ -22,17 +22,17 @@ from framework.virt_common import VM class TestVM2VMVirtioPmdCbdma(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.cores_list[0:5] - self.memory_channel = self.dut.get_memory_channels() - self.base_dir = self.dut.base_dir.replace("~", "/root") - self.pci_info = self.dut.ports_info[0]["pci"] - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.memory_channel = self.sut_node.get_memory_channels() + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + self.pci_info = self.sut_node.ports_info[0]["pci"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] - self.vhost_user = self.dut.new_session(suite="vhost") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) + self.vhost_user = self.sut_node.new_session(suite="vhost") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) def set_up(self): """ @@ -46,11 +46,11 @@ class TestVM2VMVirtioPmdCbdma(TestCase): "Path", ] self.result_table_create(self.table_header) - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("killall -s INT qemu-system-x86_64", "#") + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") self.vm_num = 2 - self.vm_dut = [] + self.vm_sut = [] self.vm = [] def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): @@ -60,7 +60,7 @@ class TestVM2VMVirtioPmdCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -84,7 +84,7 @@ class TestVM2VMVirtioPmdCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -145,7 +145,7 @@ class TestVM2VMVirtioPmdCbdma(TestCase): for i in range(self.vm_num): if restart_vm1: i = i + 1 - vm_info = VM(self.dut, "vm%d" % i, vm_config) + vm_info = VM(self.sut_node, "vm%d" % i, vm_config) vm_params["driver"] = "vhost-user" if not server_mode: vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i @@ -163,13 +163,13 @@ class TestVM2VMVirtioPmdCbdma(TestCase): vm_info.set_vm_device(**vm_params) time.sleep(3) try: - vm_dut = vm_info.start() - if vm_dut is None: + vm_sut = vm_info.start() + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) raise e - self.vm_dut.append(vm_dut) + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def start_vm0_testpmd(self): @@ -242,8 +242,8 @@ class TestVM2VMVirtioPmdCbdma(TestCase): param=param, ) self.start_vms(vm_queue=8, packed=False, server_mode=True) - self.vm0_pmd = PmdOutput(self.vm_dut[0]) - self.vm1_pmd = PmdOutput(self.vm_dut[1]) + self.vm0_pmd = PmdOutput(self.vm_sut[0]) + self.vm1_pmd = PmdOutput(self.vm_sut[1]) self.start_vm0_testpmd() self.start_vm1_testpmd(resend=False) self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8) @@ -293,8 +293,8 @@ class TestVM2VMVirtioPmdCbdma(TestCase): param=param, ) self.start_vms(vm_queue=8, packed=False, server_mode=True) - self.vm0_pmd = PmdOutput(self.vm_dut[0]) - self.vm1_pmd = PmdOutput(self.vm_dut[1]) + self.vm0_pmd = PmdOutput(self.vm_sut[0]) + self.vm1_pmd = PmdOutput(self.vm_sut[1]) self.start_vm0_testpmd() self.start_vm1_testpmd(resend=False) self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4) @@ -349,8 +349,8 @@ class TestVM2VMVirtioPmdCbdma(TestCase): param=param, ) self.start_vms(vm_queue=8, packed=True, server_mode=False) - self.vm0_pmd = PmdOutput(self.vm_dut[0]) - self.vm1_pmd = PmdOutput(self.vm_dut[1]) + self.vm0_pmd = PmdOutput(self.vm_sut[0]) + self.vm1_pmd = PmdOutput(self.vm_sut[1]) self.start_vm0_testpmd() self.start_vm1_testpmd(resend=False) self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8) @@ -358,10 +358,10 @@ class TestVM2VMVirtioPmdCbdma(TestCase): self.logger.info("Quit and relaunch VM2 with split ring") self.vm1_pmd.execute_cmd("quit", "#") self.vm[1].stop() - self.vm_dut.remove(self.vm_dut[1]) + self.vm_sut.remove(self.vm_sut[1]) self.vm.remove(self.vm[1]) self.start_vms(vm_queue=8, packed=False, restart_vm1=True, server_mode=False) - self.vm1_pmd = PmdOutput(self.vm_dut[1]) + self.vm1_pmd = PmdOutput(self.vm_sut[1]) self.vm0_pmd.execute_cmd("start") self.start_vm1_testpmd(resend=False) self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8) @@ -369,16 +369,16 @@ class TestVM2VMVirtioPmdCbdma(TestCase): def stop_all_apps(self): for i in range(len(self.vm)): - self.vm_dut[i].send_expect("quit", "#", 20) + self.vm_sut[i].send_expect("quit", "#", 20) self.vm[i].stop() self.vhost_user.send_expect("quit", "#", 30) def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -389,11 +389,11 @@ class TestVM2VMVirtioPmdCbdma(TestCase): Run after each test case. """ self.stop_all_apps() - self.dut.kill_all() + self.sut_node.kill_all() self.bind_cbdma_device_to_kernel() def tear_down_all(self): """ Run after each test suite. """ - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) diff --git a/tests/TestSuite_vm2vm_virtio_user.py b/tests/TestSuite_vm2vm_virtio_user.py index 5ba1e38f..d9d408bc 100644 --- a/tests/TestSuite_vm2vm_virtio_user.py +++ b/tests/TestSuite_vm2vm_virtio_user.py @@ -15,36 +15,36 @@ import re import time import framework.utils as utils -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase class TestVM2VMVirtioUser(TestCase): def set_up_all(self): - self.memory_channel = self.dut.get_memory_channels() + self.memory_channel = self.sut_node.get_memory_channels() self.dump_virtio_pcap = "/tmp/pdump-virtio-rx.pcap" self.dump_vhost_pcap = "/tmp/pdump-vhost-rx.pcap" self.vhost_prefix = "vhost" self.virtio_prefix_0 = "virtio0" self.virtio_prefix_1 = "virtio1" - socket_num = len(set([int(core["socket"]) for core in self.dut.cores])) + socket_num = len(set([int(core["socket"]) for core in self.sut_node.cores])) self.socket_mem = ",".join(["1024"] * socket_num) self.get_core_list() self.rebuild_flag = False - self.app_pdump = self.dut.apps_name["pdump"] - self.dut_ports = self.dut.get_ports() + self.app_pdump = self.sut_node.apps_name["pdump"] + self.sut_ports = self.sut_node.get_ports() self.cbdma_dev_infos = [] - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.device_str = "" - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.virtio_user1 = self.dut.new_session(suite="virtio-user1") - self.pdump_user = self.dut.new_session(suite="pdump-user") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) - self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1) - self.dut.restore_interfaces() + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.virtio_user1 = self.sut_node.new_session(suite="virtio-user1") + self.pdump_user = self.sut_node.new_session(suite="pdump-user") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) + self.virtio_user1_pmd = PmdOutput(self.sut_node, self.virtio_user1) + self.sut_node.restore_interfaces() self.dump_port = "device_id=net_virtio_user1" def set_up(self): @@ -53,16 +53,16 @@ class TestVM2VMVirtioUser(TestCase): """ self.nopci = True self.queue_num = 1 - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("rm -rf %s" % self.dump_virtio_pcap, "#") - self.dut.send_expect("rm -rf %s" % self.dump_vhost_pcap, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("rm -rf %s" % self.dump_virtio_pcap, "#") + self.sut_node.send_expect("rm -rf %s" % self.dump_vhost_pcap, "#") def get_core_list(self): """ create core mask """ self.core_config = "1S/6C/1T" - self.cores_list = self.dut.get_core_list(self.core_config) + self.cores_list = self.sut_node.get_core_list(self.core_config) self.verify( len(self.cores_list) >= 6, "There no enough cores to run this suite" ) @@ -117,7 +117,7 @@ class TestVM2VMVirtioUser(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -233,9 +233,9 @@ class TestVM2VMVirtioUser(TestCase): time.sleep(20) self.pdump_user.send_expect("^c", "# ", 60) time.sleep(2) - self.dut.session.copy_file_from(src="%s" % filename, dst="%s" % filename) - pkt = Packet() - pkts = pkt.read_pcapfile(filename) + self.sut_node.session.copy_file_from(src="%s" % filename, dst="%s" % filename) + scapy_pkt_builder = ScapyPacketBuilder() + pkts = scapy_pkt_builder.read_pcapfile(filename) self.verify( pkts is not None and len(pkts) == total_pkts_num, "The virtio/vhost do not capture all the packets" @@ -358,7 +358,7 @@ class TestVM2VMVirtioUser(TestCase): if dump the vhost-testpmd, the vhost-testpmd should started before launch pdump if dump the virtio-testpmd, the virtio-testpmd should started before launch pdump """ - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores="Default", prefix=file_prefix, fixed_prefix=True ) command_line = ( @@ -435,9 +435,9 @@ class TestVM2VMVirtioUser(TestCase): # quit testpmd self.quit_all_testpmd() time.sleep(2) - self.dut.session.copy_file_from(src="%s" % filename, dst="%s" % filename) - pkt = Packet() - pkts = pkt.read_pcapfile(filename) + self.sut_node.session.copy_file_from(src="%s" % filename, dst="%s" % filename) + scapy_pkt_builder = ScapyPacketBuilder() + pkts = scapy_pkt_builder.read_pcapfile(filename) self.verify( pkts is not None and len(pkts) == total_pkts_num, "The virtio/vhost do not capture all the packets" @@ -463,38 +463,38 @@ class TestVM2VMVirtioUser(TestCase): vhost received pkts in self.dump_vhost_pcap, virtio received pkts self.dump_virtio_pcap check headers and payload of all pkts are same. """ - pk_rx_virtio = Packet() - pk_rx_vhost = Packet() - pk_rx_virtio.read_pcapfile(self.dump_virtio_pcap) - pk_rx_vhost.read_pcapfile(self.dump_vhost_pcap) + scapy_pkt_builder_rx_virtio = ScapyPacketBuilder() + scapy_pkt_builder_rx_vhost = ScapyPacketBuilder() + scapy_pkt_builder_rx_virtio.read_pcapfile(self.dump_virtio_pcap) + scapy_pkt_builder_rx_vhost.read_pcapfile(self.dump_vhost_pcap) # check the headers and payload is same of vhost and virtio - for i in range(len(pk_rx_virtio)): + for i in range(len(scapy_pkt_builder_rx_virtio)): self.verify( - pk_rx_virtio[i].haslayer("Raw"), + scapy_pkt_builder_rx_virtio[i].haslayer("Raw"), "The pkt index %d, virtio pkt has no layer Raw" % i, ) self.verify( - pk_rx_vhost[i].haslayer("Raw"), + scapy_pkt_builder_rx_vhost[i].haslayer("Raw"), "The pkt index %d, vhost pkt has no layer Raw" % i, ) self.verify( - pk_rx_virtio[i].haslayer("UDP"), + scapy_pkt_builder_rx_virtio[i].haslayer("UDP"), "The pkt index %d, virtio pkt has no layer UDP" % i, ) self.verify( - pk_rx_vhost[i].haslayer("UDP"), + scapy_pkt_builder_rx_vhost[i].haslayer("UDP"), "The pkt index %d, vhost pkt has no layer UDP" % i, ) - rx_virtio_load = pk_rx_virtio[i]["Raw"].load - rx_vhost_load = pk_rx_vhost[i]["Raw"].load - rx_virtio_head = pk_rx_virtio[i]["UDP"].remove_payload() - rx_vhost_head = pk_rx_vhost[i]["UDP"].remove_payload() + rx_virtio_load = scapy_pkt_builder_rx_virtio[i]["Raw"].load + rx_vhost_load = scapy_pkt_builder_rx_vhost[i]["Raw"].load + rx_virtio_head = scapy_pkt_builder_rx_virtio[i]["UDP"].remove_payload() + rx_vhost_head = scapy_pkt_builder_rx_vhost[i]["UDP"].remove_payload() # check header is same self.verify( - pk_rx_virtio[i] == pk_rx_vhost[i], + scapy_pkt_builder_rx_virtio[i] == scapy_pkt_builder_rx_vhost[i], "the head is different on index: %d" % i + "virtio head: %s, vhost head: %s" - % (pk_rx_virtio[i].show, pk_rx_vhost[i].show()), + % (scapy_pkt_builder_rx_virtio[i].show, scapy_pkt_builder_rx_vhost[i].show()), ) # check payload is same self.verify( @@ -858,7 +858,7 @@ class TestVM2VMVirtioUser(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -880,7 +880,7 @@ class TestVM2VMVirtioUser(TestCase): "There no enough cbdma device to run this suite", ) self.device_str = " ".join(self.cbdma_dev_infos[0 : self.cbdma_nic_dev_num]) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -889,11 +889,11 @@ class TestVM2VMVirtioUser(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -1658,13 +1658,13 @@ class TestVM2VMVirtioUser(TestCase): def close_all_session(self): if getattr(self, "vhost_user", None): - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) if getattr(self, "virtio-user0", None): - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.virtio_user0) if getattr(self, "virtio-user1", None): - self.dut.close_session(self.virtio_user1) + self.sut_node.close_session(self.virtio_user1) if getattr(self, "pdump_session", None): - self.dut.close_session(self.pdump_user) + self.sut_node.close_session(self.pdump_user) def tear_down(self): """ @@ -1672,12 +1672,12 @@ class TestVM2VMVirtioUser(TestCase): """ self.quit_all_testpmd() self.bind_cbdma_device_to_kernel() - self.dut.kill_all() + self.sut_node.kill_all() time.sleep(2) def tear_down_all(self): """ Run after each test suite. """ - self.bind_nic_driver(self.dut_ports, self.drivername) + self.bind_nic_driver(self.sut_ports, self.drivername) self.close_all_session() diff --git a/tests/TestSuite_vm2vm_virtio_user_cbdma.py b/tests/TestSuite_vm2vm_virtio_user_cbdma.py index 35d69325..bb59ebef 100644 --- a/tests/TestSuite_vm2vm_virtio_user_cbdma.py +++ b/tests/TestSuite_vm2vm_virtio_user_cbdma.py @@ -13,31 +13,31 @@ mergeable, non-mergeable path test """ import re -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.test_case import TestCase class TestVM2VMVirtioUserCbdma(TestCase): def set_up_all(self): - self.memory_channel = self.dut.get_memory_channels() + self.memory_channel = self.sut_node.get_memory_channels() self.dump_virtio_pcap = "/tmp/pdump-virtio-rx.pcap" self.dump_vhost_pcap = "/tmp/pdump-vhost-rx.pcap" - self.app_pdump = self.dut.apps_name["pdump"] - self.dut_ports = self.dut.get_ports() - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores_list = self.dut.get_core_list(config="all", socket=self.ports_socket) + self.app_pdump = self.sut_node.apps_name["pdump"] + self.sut_ports = self.sut_node.get_ports() + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores_list = self.sut_node.get_core_list(config="all", socket=self.ports_socket) self.vhost_core_list = self.cores_list[0:9] self.virtio0_core_list = self.cores_list[10:12] self.virtio1_core_list = self.cores_list[12:14] - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.virtio_user1 = self.dut.new_session(suite="virtio-user1") - self.pdump_user = self.dut.new_session(suite="pdump-user") - self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) - self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1) - self.testpmd_name = self.dut.apps_name["test-pmd"].split("/")[-1] + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.virtio_user1 = self.sut_node.new_session(suite="virtio-user1") + self.pdump_user = self.sut_node.new_session(suite="pdump-user") + self.vhost_user_pmd = PmdOutput(self.sut_node, self.vhost_user) + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) + self.virtio_user1_pmd = PmdOutput(self.sut_node, self.virtio_user1) + self.testpmd_name = self.sut_node.apps_name["test-pmd"].split("/")[-1] def set_up(self): """ @@ -45,10 +45,10 @@ class TestVM2VMVirtioUserCbdma(TestCase): """ self.nopci = True self.queue_num = 1 - self.dut.send_expect("rm -rf ./vhost-net*", "#") - self.dut.send_expect("rm -rf %s" % self.dump_virtio_pcap, "#") - self.dut.send_expect("rm -rf %s" % self.dump_vhost_pcap, "#") - self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") + self.sut_node.send_expect("rm -rf ./vhost-net*", "#") + self.sut_node.send_expect("rm -rf %s" % self.dump_virtio_pcap, "#") + self.sut_node.send_expect("rm -rf %s" % self.dump_vhost_pcap, "#") + self.sut_node.send_expect("killall -s INT %s" % self.testpmd_name, "#") def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False): """ @@ -57,7 +57,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): self.all_cbdma_list = [] self.cbdma_list = [] self.cbdma_str = "" - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -81,7 +81,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): ) self.cbdma_list = self.all_cbdma_list[0:cbdma_num] self.cbdma_str = " ".join(self.cbdma_list) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.cbdma_str), "# ", @@ -116,11 +116,11 @@ class TestVM2VMVirtioUserCbdma(TestCase): return lcore_dma_param def bind_cbdma_device_to_kernel(self): - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str, "# ", 60, @@ -128,7 +128,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -268,7 +268,7 @@ class TestVM2VMVirtioUserCbdma(TestCase): if dump the vhost-testpmd, the vhost-testpmd should started before launch pdump if dump the virtio-testpmd, the virtio-testpmd should started before launch pdump """ - eal_params = self.dut.create_eal_parameters( + eal_params = self.sut_node.create_eal_parameters( cores="Default", prefix="virtio-user1", fixed_prefix=True ) command_line = ( @@ -313,11 +313,11 @@ class TestVM2VMVirtioUserCbdma(TestCase): check the payload is valid """ self.pdump_user.send_expect("^c", "# ", 60) - self.dut.session.copy_file_from( + self.sut_node.session.copy_file_from( src=self.dump_virtio_pcap, dst=self.dump_virtio_pcap ) - pkt = Packet() - pkts = pkt.read_pcapfile(self.dump_virtio_pcap) + scapy_pkt_builder = ScapyPacketBuilder() + pkts = scapy_pkt_builder.read_pcapfile(self.dump_virtio_pcap) for key, value in check_dict.items(): count = 0 for i in range(len(pkts)): @@ -1294,20 +1294,20 @@ class TestVM2VMVirtioUserCbdma(TestCase): def close_all_session(self): if getattr(self, "vhost_user", None): - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) if getattr(self, "virtio-user0", None): - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.virtio_user0) if getattr(self, "virtio-user1", None): - self.dut.close_session(self.virtio_user1) + self.sut_node.close_session(self.virtio_user1) if getattr(self, "pdump_session", None): - self.dut.close_session(self.pdump_user) + self.sut_node.close_session(self.pdump_user) def tear_down(self): """ Run after each test case. """ self.quit_all_testpmd() - self.dut.kill_all() + self.sut_node.kill_all() self.bind_cbdma_device_to_kernel() def tear_down_all(self): diff --git a/tests/TestSuite_vm_hotplug.py b/tests/TestSuite_vm_hotplug.py index e0179b3d..7abc702f 100644 --- a/tests/TestSuite_vm_hotplug.py +++ b/tests/TestSuite_vm_hotplug.py @@ -22,19 +22,19 @@ VM_CORES_MASK = "all" class TestVmHotplug(TestCase): def set_up_all(self): - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) > 1, "Insufficient ports") - self.dut.restore_interfaces() - tester_port = self.tester.get_local_port(self.dut_ports[0]) - self.tester_intf = self.tester.get_interface(tester_port) - - self.ports = self.dut.get_ports() - self.dut.send_expect("modprobe vfio-pci", "#") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) > 1, "Insufficient ports") + self.sut_node.restore_interfaces() + tg_port = self.tg_node.get_local_port(self.sut_ports[0]) + self.tg_intf = self.tg_node.get_interface(tg_port) + + self.ports = self.sut_node.get_ports() + self.sut_node.send_expect("modprobe vfio-pci", "#") self.setup_pf_1vm_env_flag = 0 - tester_port0 = self.tester.get_local_port(self.dut_ports[0]) - tester_port1 = self.tester.get_local_port(self.dut_ports[1]) - self.tester_intf0 = self.tester.get_interface(tester_port0) - self.tester_intf1 = self.tester.get_interface(tester_port1) + tg_port0 = self.tg_node.get_local_port(self.sut_ports[0]) + tg_port1 = self.tg_node.get_local_port(self.sut_ports[1]) + self.tg_intf0 = self.tg_node.get_interface(tg_port0) + self.tg_intf1 = self.tg_node.get_interface(tg_port1) self.device = 0 self.test_pmd_flag = 1 # due to current dts framework is not support monitor stdio, @@ -55,48 +55,48 @@ class TestVmHotplug(TestCase): " def start_vm(self, device=1): - self.host_session = self.dut.new_session(suite="host_session") - self.dut.bind_interfaces_linux("vfio-pci", [self.ports[0]]) + self.host_session = self.sut_node.new_session(suite="host_session") + self.sut_node.bind_interfaces_linux("vfio-pci", [self.ports[0]]) if device == 2: - self.dut.bind_interfaces_linux("vfio-pci", [self.ports[1]]) + self.sut_node.bind_interfaces_linux("vfio-pci", [self.ports[1]]) self.qemu_cmd += "-device vfio-pci,host=%s,id=dev2" cmd = self.qemu_cmd % ( - self.dut.get_ip_address(), - self.dut.ports_info[0]["pci"], - self.dut.ports_info[1]["pci"], + self.sut_node.get_ip_address(), + self.sut_node.ports_info[0]["pci"], + self.sut_node.ports_info[1]["pci"], ) else: cmd = self.qemu_cmd % ( - self.dut.get_ip_address(), - self.dut.ports_info[0]["pci"], + self.sut_node.get_ip_address(), + self.sut_node.ports_info[0]["pci"], ) self.host_session.send_expect(cmd, "QEMU ") time.sleep(10) - self.vm0_dut = self.connect_vm() - self.verify(self.vm0_dut is not None, "vm start fail") + self.vm0_sut = self.connect_vm() + self.verify(self.vm0_sut is not None, "vm start fail") self.setup_pf_1vm_env_flag = 1 - self.vm_session = self.vm0_dut.new_session(suite="vm_session") - self.vf_pci0 = self.vm0_dut.ports_info[0]["pci"] + self.vm_session = self.vm0_sut.new_session(suite="vm_session") + self.vf_pci0 = self.vm0_sut.ports_info[0]["pci"] if device == 2: - self.vf_pci1 = self.vm0_dut.ports_info[1]["pci"] - self.vm0_dut.get_ports("any") - self.vm_testpmd = PmdOutput(self.vm0_dut) + self.vf_pci1 = self.vm0_sut.ports_info[1]["pci"] + self.vm0_sut.get_ports("any") + self.vm_testpmd = PmdOutput(self.vm0_sut) def connect_vm(self): - self.vm0 = QEMUKvm(self.dut, "vm0", "vm_hotplug") + self.vm0 = QEMUKvm(self.sut_node, "vm0", "vm_hotplug") self.vm0.net_type = "hostfwd" - self.vm0.hostfwd_addr = "%s:6000" % self.dut.get_ip_address() + self.vm0.hostfwd_addr = "%s:6000" % self.sut_node.get_ip_address() self.vm0.def_driver = "vfio-pci" self.vm0.driver_mode = "noiommu" self.wait_vm_net_ready() - vm_dut = self.vm0.instantiate_vm_dut(autodetect_topo=False) - if vm_dut: - return vm_dut + vm_sut = self.vm0.instantiate_vm_sut(autodetect_topo=False) + if vm_sut: + return vm_sut else: return None def wait_vm_net_ready(self): - self.vm_net_session = self.dut.new_session(suite="vm_net_session") + self.vm_net_session = self.sut_node.new_session(suite="vm_net_session") self.start_time = time.time() cur_time = time.time() time_diff = cur_time - self.start_time @@ -120,7 +120,7 @@ class TestVmHotplug(TestCase): time.sleep(1) cur_time = time.time() time_diff = cur_time - self.start_time - self.dut.close_session(self.vm_net_session) + self.sut_node.close_session(self.vm_net_session) def set_up(self): # according to nic number starts vm @@ -128,7 +128,7 @@ class TestVmHotplug(TestCase): if "two" in self.running_case: self.device = 2 self.destroy_pf_1vm_env() - self.dut.restore_interfaces() + self.sut_node.restore_interfaces() self.start_vm(self.device) elif self.device == 0: if "two" in self.running_case: @@ -141,7 +141,7 @@ class TestVmHotplug(TestCase): pass else: self.destroy_pf_1vm_env() - self.dut.restore_interfaces() + self.sut_node.restore_interfaces() self.start_vm(self.device) def test_one_device_hotplug(self): @@ -154,7 +154,7 @@ class TestVmHotplug(TestCase): self.check_vf_device(has_device=False) self.add_pf_device_qemu(device=1) out = self.vm_testpmd.execute_cmd( - "port attach %s" % self.vm0_dut.ports_info[0]["pci"] + "port attach %s" % self.vm0_sut.ports_info[0]["pci"] ) self.verify("Port 0 is attached" in out, "attach device fail") self.verify_rxtx_only() @@ -186,11 +186,11 @@ class TestVmHotplug(TestCase): self.check_vf_device(has_device=False, device=2) self.add_pf_device_qemu(device=2) out = self.vm_testpmd.execute_cmd( - "port attach %s" % self.vm0_dut.ports_info[0]["pci"] + "port attach %s" % self.vm0_sut.ports_info[0]["pci"] ) self.verify("Port 0 is attached" in out, "attach device fail") out = self.vm_testpmd.execute_cmd( - "port attach %s" % self.vm0_dut.ports_info[1]["pci"] + "port attach %s" % self.vm0_sut.ports_info[1]["pci"] ) self.verify("Port 1 is attached" in out, "attach device fail") self.verify_rxtx_only() @@ -215,17 +215,17 @@ class TestVmHotplug(TestCase): def start_tcpdump(self, iface_list): for iface in iface_list: - self.tester.send_expect("rm -rf tcpdump%s.out" % iface, "#") - self.tester.send_expect( + self.tg_node.send_expect("rm -rf tcpdump%s.out" % iface, "#") + self.tg_node.send_expect( "tcpdump -c 1500 -i %s -vv -n 2>tcpdump%s.out &" % (iface, iface), "#" ) time.sleep(1) def get_tcpdump_package(self, iface_list): - self.tester.send_expect("killall tcpdump", "#") + self.tg_node.send_expect("killall tcpdump", "#") result = [] for iface in iface_list: - out = self.tester.send_expect("cat tcpdump%s.out" % iface, "#", timeout=60) + out = self.tg_node.send_expect("cat tcpdump%s.out" % iface, "#", timeout=60) cap_num = re.findall("(\d+) packets", out) result.append(cap_num[0]) return result @@ -239,7 +239,7 @@ class TestVmHotplug(TestCase): self.vm_testpmd.wait_link_status_up("all") self.send_packet() - out = self.vm0_dut.get_session_output(timeout=20) + out = self.vm0_sut.get_session_output(timeout=20) self.verify(self.vf0_mac in out, "vf0 receive packet fail") if self.device == 2: self.verify(self.vf1_mac in out, "vf1 receive packet fail") @@ -247,9 +247,9 @@ class TestVmHotplug(TestCase): self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.execute_cmd("set fwd txonly") iface_list = [] - iface_list.append(self.tester_intf0) + iface_list.append(self.tg_intf0) if self.device == 2: - iface_list.append(self.tester_intf1) + iface_list.append(self.tg_intf1) self.start_tcpdump(iface_list) self.vm_testpmd.execute_cmd("start") self.vm_testpmd.wait_link_status_up("all") @@ -274,12 +274,12 @@ class TestVmHotplug(TestCase): def add_pf_device_qemu(self, device=1): self.host_session.send_expect( - "device_add vfio-pci,host=%s,id=dev1" % self.dut.ports_info[0]["pci"], + "device_add vfio-pci,host=%s,id=dev1" % self.sut_node.ports_info[0]["pci"], "(qemu)", ) if device == 2: self.host_session.send_expect( - "device_add vfio-pci,host=%s,id=dev2" % self.dut.ports_info[1]["pci"], + "device_add vfio-pci,host=%s,id=dev2" % self.sut_node.ports_info[1]["pci"], "(qemu)", ) time.sleep(3) @@ -294,60 +294,60 @@ class TestVmHotplug(TestCase): time.sleep(1) def send_packet(self): - # check tester's link status before send packet - for iface in [self.tester_intf0, self.tester_intf1]: + # check TG's link status before send packet + for iface in [self.tg_intf0, self.tg_intf1]: self.verify( - self.tester.is_interface_up(intf=iface), + self.tg_node.is_interface_up(intf=iface), "Wrong link status, should be up", ) self.vf0_mac = self.vm_testpmd.get_port_mac(0) pkts = [] pkt1 = r'sendp([Ether(dst="%s")/IP()/UDP()/Raw(load="P"*26)], iface="%s")' % ( self.vf0_mac, - self.tester_intf, + self.tg_intf, ) pkts.append(pkt1) if self.device == 2: self.vf1_mac = self.vm_testpmd.get_port_mac(1) pkt2 = ( r'sendp([Ether(dst="%s")/IP()/UDP()/Raw(load="P"*26)], iface="%s")' - % (self.vf1_mac, self.tester_intf) + % (self.vf1_mac, self.tg_intf) ) pkts.append(pkt2) for pkt in pkts: - self.tester.scapy_append(pkt) - self.tester.scapy_execute() + self.tg_node.scapy_append(pkt) + self.tg_node.scapy_execute() time.sleep(2) def destroy_pf_1vm_env(self): if getattr(self, "vm0", None): - self.vm0_dut.close_session(self.vm_session) + self.vm0_sut.close_session(self.vm_session) try: self.vm0.stop() except Exception: pass - self.dut.send_expect("killall qemu-system-x86_64", "#") + self.sut_node.send_expect("killall qemu-system-x86_64", "#") time.sleep(1) - out = self.dut.send_expect("ps -ef |grep qemu", "#") - if self.dut.get_ip_address() in out: - self.dut.send_expect("killall qemu-system-x86_64", "#") + out = self.sut_node.send_expect("ps -ef |grep qemu", "#") + if self.sut_node.get_ip_address() in out: + self.sut_node.send_expect("killall qemu-system-x86_64", "#") self.vm0 = None self.setup_pf_1vm_env_flag = 0 - self.dut.close_session(self.host_session) + self.sut_node.close_session(self.host_session) self.host_session = None self.vm_session = None - self.dut.virt_exit() + self.sut_node.virt_exit() - if getattr(self, "used_dut_port", None): - self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) - port = self.dut.ports_info[self.used_dut_port]["port"] + if getattr(self, "used_sut_port", None): + self.sut_node.destroy_sriov_vfs_by_port(self.used_sut_port) + port = self.sut_node.ports_info[self.used_sut_port]["port"] port.bind_driver() - self.used_dut_port = None + self.used_sut_port = None - for port_id in self.dut_ports: - port = self.dut.ports_info[port_id]["port"] + for port_id in self.sut_ports: + port = self.sut_node.ports_info[port_id]["port"] port.bind_driver() def tear_down(self): diff --git a/tests/TestSuite_vm_power_manager.py b/tests/TestSuite_vm_power_manager.py index c4bdca62..c1342672 100644 --- a/tests/TestSuite_vm_power_manager.py +++ b/tests/TestSuite_vm_power_manager.py @@ -20,14 +20,14 @@ class TestVmPowerManager(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Not enough ports for " + self.nic) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Not enough ports for " + self.nic) # create temporary folder for power monitor - self.dut.send_expect("mkdir -p /tmp/powermonitor", "# ") - self.dut.send_expect("chmod 777 /tmp/powermonitor", "# ") + self.sut_node.send_expect("mkdir -p /tmp/powermonitor", "# ") + self.sut_node.send_expect("chmod 777 /tmp/powermonitor", "# ") # compile vm power manager - out = self.dut.build_dpdk_apps("./examples/vm_power_manager") + out = self.sut_node.build_dpdk_apps("./examples/vm_power_manager") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") @@ -35,7 +35,7 @@ class TestVmPowerManager(TestCase): self.vcpu_map = [] # start vm self.vm_name = "vm0" - self.vm = LibvirtKvm(self.dut, self.vm_name, self.suite_name) + self.vm = LibvirtKvm(self.sut_node, self.vm_name, self.suite_name) channels = [ { "path": "/tmp/powermonitor/%s.0" % self.vm_name, @@ -73,7 +73,7 @@ class TestVmPowerManager(TestCase): for channel in channels: self.vm.add_vm_virtio_serial_channel(**channel) - self.vm_dut = self.vm.start() + self.vm_sut = self.vm.start() # ping cpus cpus = self.vm.get_vm_cpu() @@ -81,21 +81,21 @@ class TestVmPowerManager(TestCase): self.core_num = len(cpus) # build guest cli - out = self.vm_dut.build_dpdk_apps("examples/vm_power_manager/guest_cli") + out = self.vm_sut.build_dpdk_apps("examples/vm_power_manager/guest_cli") self.verify("Error" not in out, "Compilation error") self.verify("No such" not in out, "Compilation error") - self.path = self.vm_dut.apps_name["guest_cli"] + self.path = self.vm_sut.apps_name["guest_cli"] self.guest_cmd = ( self.path + "-c 0xff -n 8 -- --vm-name=%s --vcpu-list=0,1,2,3,4,5,6,7" % self.vm_name ) - self.vm_power_dir = self.vm_dut.apps_name["vm_power_manager"] + self.vm_power_dir = self.vm_sut.apps_name["vm_power_manager"] mgr_cmd = self.vm_power_dir + "-c 0xfff -n 8" - out = self.dut.send_expect(mgr_cmd, "vmpower>", 120) - self.dut.send_expect("add_vm %s" % self.vm_name, "vmpower>") - self.dut.send_expect("add_channels %s all" % self.vm_name, "vmpower>") - vm_info = self.dut.send_expect("show_vm %s" % self.vm_name, "vmpower>") + out = self.sut_node.send_expect(mgr_cmd, "vmpower>", 120) + self.sut_node.send_expect("add_vm %s" % self.vm_name, "vmpower>") + self.sut_node.send_expect("add_channels %s all" % self.vm_name, "vmpower>") + vm_info = self.sut_node.send_expect("show_vm %s" % self.vm_name, "vmpower>") # performance measure self.frame_sizes = [128] @@ -113,12 +113,12 @@ class TestVmPowerManager(TestCase): Check power monitor channel connection """ # check Channels and vcpus - out = self.vm_dut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) - self.vm_dut.send_expect("quit", "# ") + out = self.vm_sut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) + self.vm_sut.send_expect("quit", "# ") def get_cpu_frequency(self, core_id): cpu_regex = ".*\nCore (\d+) frequency: (\d+)" - out = self.dut.send_expect("show_cpu_freq %s" % core_id, "vmpower>") + out = self.sut_node.send_expect("show_cpu_freq %s" % core_id, "vmpower>") m = re.match(cpu_regex, out) freq = -1 if m: @@ -130,10 +130,10 @@ class TestVmPowerManager(TestCase): """ Check host cpu frequency can scale down in VM """ - out = self.vm_dut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) + out = self.vm_sut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) for vcpu in range(self.core_num): - self.vm_dut.send_expect("set_cpu_freq %d max" % vcpu, "vmpower\(guest\)>") + self.vm_sut.send_expect("set_cpu_freq %d max" % vcpu, "vmpower\(guest\)>") for vcpu in range(self.core_num): # map between host cpu and guest cpu @@ -143,7 +143,7 @@ class TestVmPowerManager(TestCase): for loop in range(len(freqs) - 1): # connect vm power host and guest - self.vm_dut.send_expect( + self.vm_sut.send_expect( "set_cpu_freq %d down" % vcpu, "vmpower\(guest\)>" ) cur_freq = self.get_cpu_frequency(self.vcpu_map[vcpu]) @@ -151,23 +151,23 @@ class TestVmPowerManager(TestCase): self.verify(ori_freq > cur_freq, "Cpu freqenecy can not scale down") ori_freq = cur_freq - self.vm_dut.send_expect("quit", "# ") + self.vm_sut.send_expect("quit", "# ") def test_vm_power_managment_frequp(self): """ Check host cpu frequency can scale up in VM """ - out = self.vm_dut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) + out = self.vm_sut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) for vcpu in range(self.core_num): - self.vm_dut.send_expect("set_cpu_freq %d min" % vcpu, "vmpower\(guest\)>") + self.vm_sut.send_expect("set_cpu_freq %d min" % vcpu, "vmpower\(guest\)>") for vcpu in range(self.core_num): ori_freq = self.get_cpu_frequency(self.vcpu_map[vcpu]) # get cpu frequencies range freqs = self.get_cpu_freqs(vcpu) for loop in range(len(freqs) - 1): - self.vm_dut.send_expect( + self.vm_sut.send_expect( "set_cpu_freq %d up" % vcpu, "vmpower\(guest\)>" ) cur_freq = self.get_cpu_frequency(self.vcpu_map[vcpu]) @@ -175,51 +175,51 @@ class TestVmPowerManager(TestCase): self.verify(cur_freq > ori_freq, "Cpu freqenecy can not scale up") ori_freq = cur_freq - self.vm_dut.send_expect("quit", "# ") + self.vm_sut.send_expect("quit", "# ") def test_vm_power_managment_freqmax(self): """ Check host cpu frequency can scale to max in VM """ - out = self.vm_dut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) + out = self.vm_sut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) max_freq_path = ( "cat /sys/devices/system/cpu/cpu%s/cpufreq/" + "cpuinfo_max_freq" ) for vcpu in range(self.core_num): - self.vm_dut.send_expect("set_cpu_freq %d max" % vcpu, "vmpower\(guest\)>") + self.vm_sut.send_expect("set_cpu_freq %d max" % vcpu, "vmpower\(guest\)>") freq = self.get_cpu_frequency(self.vcpu_map[vcpu]) - out = self.dut.alt_session.send_expect( + out = self.sut_node.alt_session.send_expect( max_freq_path % self.vcpu_map[vcpu], "# " ) max_freq = int(out) self.verify(freq == max_freq, "Cpu max frequency not correct") print((utils.GREEN("After frequency max, freq is %d\n" % max_freq))) - self.vm_dut.send_expect("quit", "# ") + self.vm_sut.send_expect("quit", "# ") def test_vm_power_managment_freqmin(self): """ Check host cpu frequency can scale to min in VM """ - out = self.vm_dut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) + out = self.vm_sut.send_expect(self.guest_cmd, "vmpower\(guest\)>", 120) min_freq_path = ( "cat /sys/devices/system/cpu/cpu%s/cpufreq/" + "cpuinfo_min_freq" ) for vcpu in range(self.core_num): - self.vm_dut.send_expect("set_cpu_freq %d min" % vcpu, "vmpower\(guest\)>") + self.vm_sut.send_expect("set_cpu_freq %d min" % vcpu, "vmpower\(guest\)>") freq = self.get_cpu_frequency(self.vcpu_map[vcpu]) - out = self.dut.alt_session.send_expect( + out = self.sut_node.alt_session.send_expect( min_freq_path % self.vcpu_map[vcpu], "# " ) min_freq = int(out) self.verify(freq == min_freq, "Cpu min frequency not correct") print((utils.GREEN("After frequency min, freq is %d\n" % min_freq))) - self.vm_dut.send_expect("quit", "# ") + self.vm_sut.send_expect("quit", "# ") def get_freq_in_transmission(self): self.cur_freq = self.get_cpu_frequency(self.vcpu_map[1]) @@ -228,14 +228,14 @@ class TestVmPowerManager(TestCase): def get_max_freq(self, core_num): freq_path = "cat /sys/devices/system/cpu/cpu%d/cpufreq/" + "cpuinfo_max_freq" - out = self.dut.alt_session.send_expect(freq_path % core_num, "# ") + out = self.sut_node.alt_session.send_expect(freq_path % core_num, "# ") freq = int(out) return freq def get_min_freq(self, core_num): freq_path = "cat /sys/devices/system/cpu/cpu%d/cpufreq/" + "cpuinfo_min_freq" - out = self.dut.alt_session.send_expect(freq_path % core_num, "# ") + out = self.sut_node.alt_session.send_expect(freq_path % core_num, "# ") freq = int(out) return freq @@ -245,7 +245,7 @@ class TestVmPowerManager(TestCase): + "scaling_available_frequencies" ) - out = self.dut.alt_session.send_expect(freq_path % core_num, "# ") + out = self.sut_node.alt_session.send_expect(freq_path % core_num, "# ") freqs = out.split() return freqs @@ -253,14 +253,14 @@ class TestVmPowerManager(TestCase): """ Run after each test case. """ - self.vm_dut.send_expect("quit", "# ") + self.vm_sut.send_expect("quit", "# ") pass def tear_down_all(self): """ Run after each test suite. """ - self.dut.send_expect("quit", "# ") + self.sut_node.send_expect("quit", "# ") self.vm.stop() - self.dut.virt_exit() + self.sut_node.virt_exit() pass diff --git a/tests/TestSuite_vm_pw_mgmt_policy.py b/tests/TestSuite_vm_pw_mgmt_policy.py index 2cb7a1f9..d048dc52 100644 --- a/tests/TestSuite_vm_pw_mgmt_policy.py +++ b/tests/TestSuite_vm_pw_mgmt_policy.py @@ -18,10 +18,10 @@ from itertools import product from pprint import pformat from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import TRANSMIT_CONT from framework.pmd_output import PmdOutput from framework.qemu_libvirt import LibvirtKvm +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import TRANSMIT_CONT from framework.test_case import TestCase from framework.utils import create_mask as dts_create_mask @@ -37,23 +37,23 @@ class TestVmPwMgmtPolicy(TestCase): def target_dir(self): # get absolute directory of target source code target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir - def get_cores_mask(self, config="all", crb=None): - ports_socket = self.dut.get_numa_id(self.dut.get_ports()[0]) - mask = dts_create_mask(self.dut.get_core_list(config, socket=ports_socket)) + def get_cores_mask(self, config="all", node=None): + ports_socket = self.sut_node.get_numa_id(self.sut_node.get_ports()[0]) + mask = dts_create_mask(self.sut_node.get_core_list(config, socket=ports_socket)) return mask - def prepare_binary(self, name, host_crb=None): - _host_crb = host_crb if host_crb else self.dut + def prepare_binary(self, name, host_node=None): + _host_node = host_node if host_node else self.sut_node example_dir = "examples/" + name - out = _host_crb.build_dpdk_apps("./" + example_dir) + out = _host_node.build_dpdk_apps("./" + example_dir) return os.path.join( - self.target_dir, _host_crb.apps_name[os.path.basename(name)] + self.target_dir, _host_node.apps_name[os.path.basename(name)] ) def add_console(self, session): @@ -64,10 +64,10 @@ class TestVmPwMgmtPolicy(TestCase): def get_console(self, name): default_con_table = { - self.dut.session.name: [self.dut.send_expect, self.dut.get_session_output], - self.dut.alt_session.name: [ - self.dut.alt_session.send_expect, - self.dut.alt_session.session.get_output_all, + self.sut_node.session.name: [self.sut_node.send_expect, self.sut_node.get_session_output], + self.sut_node.alt_session.name: [ + self.sut_node.alt_session.send_expect, + self.sut_node.alt_session.session.get_output_all, ], } if name not in default_con_table: @@ -75,7 +75,7 @@ class TestVmPwMgmtPolicy(TestCase): else: return default_con_table.get(name) - def execute_cmds(self, cmds, name="dut"): + def execute_cmds(self, cmds, name="sut"): console, msg_pipe = self.get_console(name) if len(cmds) == 0: return @@ -113,19 +113,19 @@ class TestVmPwMgmtPolicy(TestCase): return outputs def d_con(self, cmds): - return self.execute_cmds(cmds, name=self.dut.session.name) + return self.execute_cmds(cmds, name=self.sut_node.session.name) def d_a_con(self, cmds): - return self.execute_cmds(cmds, name=self.dut.alt_session.name) + return self.execute_cmds(cmds, name=self.sut_node.alt_session.name) def vm_con(self, cmds): - return self.execute_cmds(cmds, name=self.vm_dut.session.name) + return self.execute_cmds(cmds, name=self.vm_sut.session.name) def vm_g_con(self, cmds): return self.execute_cmds(cmds, name=self.guest_con_name) def config_stream(self, stm_names=None): - dmac = self.vm_dut.get_mac_address(0) + dmac = self.vm_sut.get_mac_address(0) # set streams for traffic pkt_configs = { "UDP_1": { @@ -145,11 +145,11 @@ class TestVmPwMgmtPolicy(TestCase): values = pkt_configs[stm_name] pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - streams.append(pkt.pktgen.pkt) - self.logger.debug(pkt.pktgen.pkt.command()) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + streams.append(scapy_pkt_builder.scapy_pkt_util.pkt) + self.logger.debug(scapy_pkt_builder.scapy_pkt_util.pkt.command()) return streams @@ -158,8 +158,8 @@ class TestVmPwMgmtPolicy(TestCase): for pkt in send_pkt: _option = deepcopy(option) _option["pcap"] = pkt - stream_id = self.tester.pktgen.add_stream(txport, rxport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) return stream_ids @@ -169,8 +169,8 @@ class TestVmPwMgmtPolicy(TestCase): rate_percent = option.get("rate_percent", float(100)) send_pkt = option.get("stream") or [] # clear streams before add new streams - self.tester.pktgen.clear_streams() - # set stream into pktgen + self.tg_node.perf_tg.clear_streams() + # set stream into traffic generator stream_option = { "stream_config": { "txmode": {}, @@ -181,8 +181,8 @@ class TestVmPwMgmtPolicy(TestCase): stream_ids = self.add_stream_to_pktgen(txport, rxport, send_pkt, stream_option) # run traffic options traffic_opt = option.get("traffic_opt") - # run pktgen traffic - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + # run traffic generator traffic + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) return result @@ -193,16 +193,16 @@ class TestVmPwMgmtPolicy(TestCase): return rate_percent def run_traffic(self, option): - dut_port = self.dut_ports[self.used_port] - tester_tx_port_id = self.tester.get_local_port(dut_port) - tester_rx_port_id = self.tester.get_local_port(dut_port) + sut_port = self.sut_ports[self.used_port] + tg_tx_port_id = self.tg_node.get_local_port(sut_port) + tg_rx_port_id = self.tg_node.get_local_port(sut_port) stm_type = option.get("stm_types") pps = option.get("pps") rate = self.get_rate_percent(pps) duration = option.get("duration", None) or 15 ports_topo = { - "tx_intf": tester_tx_port_id, - "rx_intf": tester_rx_port_id, + "tx_intf": tg_tx_port_id, + "rx_intf": tg_rx_port_id, "stream": self.config_stream(stm_type), "rate_percent": rate, "traffic_opt": { @@ -218,7 +218,7 @@ class TestVmPwMgmtPolicy(TestCase): return result def bind_ports_to_sys(self): - for port in self.dut.ports_info: + for port in self.sut_node.ports_info: netdev = port.get("port") if not netdev: continue @@ -231,7 +231,7 @@ class TestVmPwMgmtPolicy(TestCase): def bind_ports_to_dpdk(self, driver): if not driver: return - for port in self.dut.ports_info: + for port in self.sut_node.ports_info: netdev = port.get("port") if not netdev: continue @@ -244,7 +244,7 @@ class TestVmPwMgmtPolicy(TestCase): self.vm = ( self.vcpu_map ) = ( - self.vm_dut + self.vm_sut ) = ( self.guest_session ) = self.is_guest_on = self.is_vm_on = self.is_vf_set = None @@ -260,17 +260,17 @@ class TestVmPwMgmtPolicy(TestCase): self.d_a_con(cmd) def create_vf(self, driver="default"): - self.dut.generate_sriov_vfs_by_port(self.used_port, 1, driver=driver) + self.sut_node.generate_sriov_vfs_by_port(self.used_port, 1, driver=driver) self.is_vf_set = True - sriov_vfs_port = self.dut.ports_info[self.used_port]["vfs_port"] + sriov_vfs_port = self.sut_node.ports_info[self.used_port]["vfs_port"] return sriov_vfs_port[0].pci def destroy_vf(self): if not self.is_vf_set: return - self.dut.destroy_sriov_vfs_by_port(self.used_port) + self.sut_node.destroy_sriov_vfs_by_port(self.used_port) self.is_vf_set = False - port = self.dut.ports_info[self.used_port]["port"] + port = self.sut_node.ports_info[self.used_port]["port"] port.bind_driver() def add_nic_device(self, pci_addr, vm_inst): @@ -286,7 +286,7 @@ class TestVmPwMgmtPolicy(TestCase): # set vm initialize parameters self.init_vms_params() # start vm - self.vm = LibvirtKvm(self.dut, self.vm_name, self.suite_name) + self.vm = LibvirtKvm(self.sut_node, self.vm_name, self.suite_name) # pass vf to virtual machine pci_addr = self.create_vf() self.add_nic_device(pci_addr, self.vm) @@ -302,10 +302,10 @@ class TestVmPwMgmtPolicy(TestCase): # set vm default driver self.vm.def_driver = "vfio-pci" # boot up vm - self.vm_dut = self.vm.start() + self.vm_sut = self.vm.start() self.is_vm_on = True - self.verify(self.vm_dut, "create vm_dut fail !") - self.add_console(self.vm_dut.session) + self.verify(self.vm_sut, "create vm_sut fail !") + self.add_console(self.vm_sut.session) # get virtual machine cpu cores _vcpu_map = self.vm.get_vm_cpu() self.vcpu_map = [int(item) for item in _vcpu_map] @@ -314,12 +314,12 @@ class TestVmPwMgmtPolicy(TestCase): # close vm if self.is_vm_on: if self.guest_session: - self.vm_dut.close_session(self.guest_session) + self.vm_sut.close_session(self.guest_session) self.guest_session = None self.vm.stop() self.is_vm_on = False self.vm = None - self.dut.virt_exit() + self.sut_node.virt_exit() cmd_fmt = "virsh {0} {1} > /dev/null 2>&1".format cmds = [ [cmd_fmt("shutdown", self.vm_name), "# "], @@ -337,7 +337,7 @@ class TestVmPwMgmtPolicy(TestCase): eal_option = ("-v " "-c {core_mask} " "-n {mem_channel} ").format( **{ "core_mask": self.get_cores_mask("1S/12C/1T"), - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), } ) prompt = "vmpower>" @@ -364,9 +364,9 @@ class TestVmPwMgmtPolicy(TestCase): def init_guest_mgr(self): name = "vm_power_manager/guest_cli" - self.guest_cli = self.prepare_binary(name, host_crb=self.vm_dut) - self.guest_con_name = "_".join([self.vm_dut.NAME, name.replace("/", "-")]) - self.guest_session = self.vm_dut.create_session(self.guest_con_name) + self.guest_cli = self.prepare_binary(name, host_node=self.vm_sut) + self.guest_con_name = "_".join([self.vm_sut.NAME, name.replace("/", "-")]) + self.guest_session = self.vm_sut.create_session(self.guest_con_name) self.add_console(self.guest_session) def start_guest_mgr(self, cmd_option): @@ -380,7 +380,7 @@ class TestVmPwMgmtPolicy(TestCase): ).format( **{ "core_mask": "0xff", - "memory_channel": self.vm_dut.get_memory_channels(), + "memory_channel": self.vm_sut.get_memory_channels(), "file_prefix": "vmpower2", } ) + cmd_option @@ -410,7 +410,7 @@ class TestVmPwMgmtPolicy(TestCase): self.is_guest_on = False def init_vm_testpmd(self): - self.vm_testpmd = PmdOutput(self.vm_dut) + self.vm_testpmd = PmdOutput(self.vm_sut) def start_vm_testpmd(self): eal_param = ("-v " "-m {memsize} " "--file-prefix={file-prefix}").format( @@ -448,7 +448,7 @@ class TestVmPwMgmtPolicy(TestCase): random_index = random.randint(0, len(time_stage) - 1) timestamp = time_stage[random_index] ori_sys_time = datetime.now() - msg = "dut system original time is {0}".format(ori_sys_time) + msg = "SUT system original time is {0}".format(ori_sys_time) self.logger.debug(msg) # set system time to a desired time for policy msg = "set timestamp {0}".format(timestamp) @@ -468,7 +468,7 @@ class TestVmPwMgmtPolicy(TestCase): self.logger.info(msg) # get begin time stamp pre_time = datetime.now() - # when dut/tester are on the same node, separate into two timestamp + # when SUT/TG are on the same node, separate into two timestamp return pre_time, ori_sys_time def restore_system_time(self, pre_time, ori_sys_time): @@ -504,7 +504,7 @@ class TestVmPwMgmtPolicy(TestCase): "scaling_min_freq", ] freq = "/sys/devices/system/cpu/cpu{0}/cpufreq/{1}".format - cpu_topos = self.dut.get_all_cores() + cpu_topos = self.sut_node.get_all_cores() cpu_info = {} for cpu_topo in cpu_topos: cpu_id = int(cpu_topo.get("thread")) @@ -570,9 +570,9 @@ class TestVmPwMgmtPolicy(TestCase): """ status: enable_turbo | disable_turbo """ - dut_core_index = self.vcpu_map[vcpu] + sut_core_index = self.vcpu_map[vcpu] self.guest_set_vm_turbo_status(vcpu, status) - return int(dut_core_index) + return int(sut_core_index) def get_expected_turbo_freq(self, core_index, status="disable"): info = self.cpu_info.get(core_index, {}) @@ -580,32 +580,32 @@ class TestVmPwMgmtPolicy(TestCase): expected_freq = value[-2] if status == "disable" else value[-1] return expected_freq - def check_dut_core_turbo_enable(self, vcpu): - dut_core_index = self.set_single_core_turbo(vcpu, "enable_turbo") - cur_freq = self.get_linux_cpu_attrs(dut_core_index) - expected_freq = self.get_expected_turbo_freq(dut_core_index, "enable") + def check_sut_core_turbo_enable(self, vcpu): + sut_core_index = self.set_single_core_turbo(vcpu, "enable_turbo") + cur_freq = self.get_linux_cpu_attrs(sut_core_index) + expected_freq = self.get_expected_turbo_freq(sut_core_index, "enable") if cur_freq != expected_freq: msg = ( "core <{0}> turbo status: cur frequency is <{1}> " "not as expected frequency <{2}>" - ).format(dut_core_index, cur_freq, expected_freq) + ).format(sut_core_index, cur_freq, expected_freq) raise VerifyFailure(msg) self.logger.info( - "core <{0}> turbo status set successful".format(dut_core_index) + "core <{0}> turbo status set successful".format(sut_core_index) ) - def check_dut_core_turbo_disable(self, vcpu): - dut_core_index = self.set_single_core_turbo(vcpu, "disable_turbo") - cur_freq = self.get_linux_cpu_attrs(dut_core_index) - expected_freq = self.get_expected_turbo_freq(dut_core_index, "disable") + def check_sut_core_turbo_disable(self, vcpu): + sut_core_index = self.set_single_core_turbo(vcpu, "disable_turbo") + cur_freq = self.get_linux_cpu_attrs(sut_core_index) + expected_freq = self.get_expected_turbo_freq(sut_core_index, "disable") if cur_freq != expected_freq: msg = ( "core <{0}> turbo status: cur frequency is <{1}> " "not as expected frequency <{2}>" - ).format(dut_core_index, cur_freq, expected_freq) + ).format(sut_core_index, cur_freq, expected_freq) raise VerifyFailure(msg) self.logger.info( - "core <{0}> turbo status disable successful".format(dut_core_index) + "core <{0}> turbo status disable successful".format(sut_core_index) ) def get_expected_freq(self, core_index, check_item): @@ -852,7 +852,7 @@ class TestVmPwMgmtPolicy(TestCase): try: self.run_test_pre("turbo") self.start_guest_mgr(test_content.get("option")) - check_func = getattr(self, "check_dut_core_turbo_{}".format(status)) + check_func = getattr(self, "check_sut_core_turbo_{}".format(status)) check_func(0) except Exception as e: self.logger.error(traceback.format_exc()) @@ -869,7 +869,7 @@ class TestVmPwMgmtPolicy(TestCase): def verify_power_driver(self): expected_drv = "acpi-cpufreq" power_drv = self.get_sys_power_driver() - msg = "{0} should work with {1} driver on DUT".format( + msg = "{0} should work with {1} driver on SUT".format( self.suite_name, expected_drv ) self.verify(power_drv == expected_drv, msg) @@ -879,14 +879,14 @@ class TestVmPwMgmtPolicy(TestCase): cmd = "whereis {} > /dev/null 2>&1; echo $?".format(name) output = self.d_a_con(cmd) status = True if output and output.strip() == "0" else False - msg = "<{}> tool have not installed on DUT".format(name) + msg = "<{}> tool have not installed on SUT".format(name) self.verify(status, msg) def preset_test_environment(self): self.is_mgr_on = self.is_pmd_on = None self.ext_con = {} # get cpu cores information - self.dut.init_core_list_uncached_linux() + self.sut_node.init_core_list_uncached_linux() self.cpu_info = self.get_all_cpu_attrs() # port management self.cur_drv = self.bind_ports_to_sys() @@ -914,8 +914,8 @@ class TestVmPwMgmtPolicy(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 1, "Not enough ports") + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 1, "Not enough ports") self.verify_cpupower_tool() self.verify_power_driver() # prepare testing environment @@ -938,9 +938,9 @@ class TestVmPwMgmtPolicy(TestCase): """ Run after each test case. """ - self.dut.send_expect("systemctl restart chronyd", "# ") - self.vm_dut.kill_all() - self.dut.kill_all() + self.sut_node.send_expect("systemctl restart chronyd", "# ") + self.vm_sut.kill_all() + self.sut_node.kill_all() def test_perf_turbo_enable(self): """ diff --git a/tests/TestSuite_vmdq.py b/tests/TestSuite_vmdq.py index 280d54bb..3f7efa3d 100644 --- a/tests/TestSuite_vmdq.py +++ b/tests/TestSuite_vmdq.py @@ -13,9 +13,9 @@ import re from time import sleep import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVmdq(TestCase): @@ -24,19 +24,19 @@ class TestVmdq(TestCase): Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) - self.dut.build_install_dpdk(self.target) - # out = self.dut.send_expect("make -C examples/vmdq", "#", 10) - out = self.dut.build_dpdk_apps("examples/vmdq") + self.sut_node.build_install_dpdk(self.target) + # out = self.sut_node.send_expect("make -C examples/vmdq", "#", 10) + out = self.sut_node.build_dpdk_apps("examples/vmdq") self.verify("Error" not in out, "Compilation error") - self.app_vmdq_path = self.dut.apps_name["vmdq"] + self.app_vmdq_path = self.sut_node.apps_name["vmdq"] self.frame_size = 64 self.header_size = HEADER_SIZE["ip"] + HEADER_SIZE["eth"] - self.destmac_port = ["52:54:00:12:0%d:00" % i for i in self.dut_ports] + self.destmac_port = ["52:54:00:12:0%d:00" % i for i in self.sut_ports] self.core_configs = [] self.core_configs.append({"cores": "1S/1C/1T", "mpps": {}}) self.core_configs.append({"cores": "1S/2C/1T", "mpps": {}}) @@ -64,7 +64,7 @@ class TestVmdq(TestCase): cur_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.prios = range(8) def set_up(self): @@ -77,15 +77,15 @@ class TestVmdq(TestCase): """ Prepare the commandline and start vmdq app """ - core_list = self.dut.get_core_list(core_config, socket=self.ports_socket) + core_list = self.sut_node.get_core_list(core_config, socket=self.ports_socket) self.verify(core_list is not None, "Requested cores failed") core_mask = utils.create_mask(core_list) - port_mask = utils.create_mask(self.dut_ports) + port_mask = utils.create_mask(self.sut_ports) eal_param = "" - for i in self.dut_ports: - eal_param += " -a %s" % self.dut.ports_info[i]["pci"] + for i in self.sut_ports: + eal_param += " -a %s" % self.sut_node.ports_info[i]["pci"] # Run the application - self.dut.send_expect( + self.sut_node.send_expect( "./%s -c %s -n 4 %s -- -p %s --nb-pools %s --enable-rss" % (self.app_vmdq_path, core_mask, eal_param, port_mask, str(npools)), "reading queues", @@ -96,8 +96,8 @@ class TestVmdq(TestCase): """ create streams for ports. """ - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_input = [] for prio in prios: pcap = os.sep.join( @@ -112,15 +112,15 @@ class TestVmdq(TestCase): """ payload = self.frame_size - self.header_size for prio in prios: - self.tester.scapy_append( + self.tg_node.scapy_append( 'flows = [Ether(dst="%s")/Dot1Q(vlan=0,prio=%d)/IP(src="1.2.3.4", dst="1.1.1.1")/("X"*%d)]' % (self.destmac_port[0], prio, payload) ) pcap = os.sep.join( [self.output_path, "%s%d.pcap" % (self.suite_name, prio)] ) - self.tester.scapy_append('wrpcap("%s", flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", flows)' % pcap) + self.tg_node.scapy_execute() def verify_all_vmdq_stats(self): """ @@ -140,12 +140,12 @@ class TestVmdq(TestCase): ) def get_vmdq_stats(self): - vmdq_session = self.dut.new_session() - app_name = self.dut.apps_name["vmdq"].split("/")[-1] + vmdq_session = self.sut_node.new_session() + app_name = self.sut_node.apps_name["vmdq"].split("/")[-1] vmdq_session.send_expect( "kill -s SIGHUP `pgrep -fl %s | awk '{print $1}'`" % app_name, "#", 20 ) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.logger.info(out) vmdq_session.close() return out @@ -163,13 +163,13 @@ class TestVmdq(TestCase): vm_config = self.set_fields(self.pools, self.pools) # Start traffic transmission using approx 10% of line rate. ratePercent = 10 - # run packet generator + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, ratePercent, vm_config, self.tester.pktgen + tgen_input, ratePercent, vm_config, self.tg_node.perf_tg ) # set traffic option options = {"duration": 15} - loss = self.tester.pktgen.measure_loss(stream_ids=streams, options=options) + loss = self.tg_node.perf_tg.measure_loss(stream_ids=streams, options=options) self.logger.info( "loss is [loss rate, SendNumbers, ReceNumbers]{}!".format(loss) ) @@ -183,23 +183,23 @@ class TestVmdq(TestCase): def create_throughput_traffic(self, frame_size): payload = frame_size - self.header_size tgen_Input = [] - for _port in self.dut_ports: - if _port % len(self.dut_ports) == 0 or len(self.dut_ports) % _port == 2: - txIntf = self.tester.get_local_port(self.dut_ports[_port + 1]) + for _port in self.sut_ports: + if _port % len(self.sut_ports) == 0 or len(self.sut_ports) % _port == 2: + txIntf = self.tg_node.get_local_port(self.sut_ports[_port + 1]) dst_port = _port + 1 else: - txIntf = self.tester.get_local_port(self.dut_ports[_port - 1]) + txIntf = self.tg_node.get_local_port(self.sut_ports[_port - 1]) dst_port = _port - 1 - rxIntf = self.tester.get_local_port(self.dut_ports[_port]) - self.tester.scapy_append( + rxIntf = self.tg_node.get_local_port(self.sut_ports[_port]) + self.tg_node.scapy_append( 'flows = [Ether(dst="%s")/Dot1Q(vlan=0)/IP(src="1.2.3.4", dst="1.1.1.1")/("X"*%d)]' % (self.destmac_port[dst_port], payload) ) pcap = os.sep.join( [self.output_path, "%s-%d.pcap" % (self.suite_name, _port)] ) - self.tester.scapy_append('wrpcap("%s", flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", flows)' % pcap) + self.tg_node.scapy_execute() tgen_Input.append((txIntf, rxIntf, pcap)) return tgen_Input @@ -210,7 +210,7 @@ class TestVmdq(TestCase): frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518] for config in self.core_configs: self.logger.info(config["cores"]) - self.dut.kill_all() + self.sut_node.kill_all() core_config = config["cores"] self.start_application(self.pools, core_config) self.logger.info("Waiting for application to initialize") @@ -219,13 +219,13 @@ class TestVmdq(TestCase): self.logger.info(str(frame_size)) tgen_input = self.create_throughput_traffic(frame_size) # clear streams before add new streams - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() vm_config = self.set_fields(self.pools, self.pools) - # run packet generator + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config, self.tester.pktgen + tgen_input, 100, vm_config, self.tg_node.perf_tg ) - _, pps = self.tester.pktgen.measure_throughput(stream_ids=streams) + _, pps = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) self.verify(pps > 0, "No traffic detected") config["mpps"][frame_size] = pps / 1000000.0 # Print results @@ -254,11 +254,11 @@ class TestVmdq(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ Run after each test suite. """ # resume setting - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) diff --git a/tests/TestSuite_vmdq_dcb.py b/tests/TestSuite_vmdq_dcb.py index 03088045..d097c833 100644 --- a/tests/TestSuite_vmdq_dcb.py +++ b/tests/TestSuite_vmdq_dcb.py @@ -13,9 +13,9 @@ import random import re import framework.utils as utils -from framework.pktgen import PacketGeneratorHelper from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVmdqDcb(TestCase): @@ -23,9 +23,9 @@ class TestVmdqDcb(TestCase): """ Run at the start of each test suite. """ - self.dut_ports = self.dut.get_ports(self.nic) - self.verify(len(self.dut_ports) >= 2, "Insufficient ports") - self.socket = self.dut.get_numa_id(self.dut_ports[0]) + self.sut_ports = self.sut_node.get_ports(self.nic) + self.verify(len(self.sut_ports) >= 2, "Insufficient ports") + self.socket = self.sut_node.get_numa_id(self.sut_ports[0]) self.frame_size = 64 self.destmac_port = "52:54:00:12:00:00" @@ -37,7 +37,7 @@ class TestVmdqDcb(TestCase): self.output_path = os.sep.join([cur_path, self.logger.log_path]) # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() self.prios = range(8) self.create_pcaps(self.prios) @@ -56,36 +56,36 @@ class TestVmdqDcb(TestCase): """ Build example "Vmdq_dcb". """ - out = self.dut.build_dpdk_apps("examples/vmdq_dcb") + out = self.sut_node.build_dpdk_apps("examples/vmdq_dcb") self.verify("Error" not in out, "Compilation error") def rebuild_dpdk(self, nb_queue_per_vm=4): """ Rebuild dpdk """ - self.dut.set_build_options( + self.sut_node.set_build_options( {"RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM": nb_queue_per_vm} ) - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) def start_application(self, npools, ntcs): """ Prepare the commandline and start vmdq_dcb app """ - core_list = self.dut.get_core_list("1S/%dC/1T" % ntcs, socket=self.socket) + core_list = self.sut_node.get_core_list("1S/%dC/1T" % ntcs, socket=self.socket) self.verify(core_list is not None, "Requested %d cores failed" % ntcs) core_mask = utils.create_mask(core_list) - port_mask = utils.create_mask(self.dut_ports) + port_mask = utils.create_mask(self.sut_ports) eal_param = "" - for i in self.dut_ports: - eal_param += " -a %s" % self.dut.ports_info[i]["pci"] + for i in self.sut_ports: + eal_param += " -a %s" % self.sut_node.ports_info[i]["pci"] # Run the application - app_name = self.dut.apps_name["vmdq_dcb"] + app_name = self.sut_node.apps_name["vmdq_dcb"] command = ( app_name + "-c %s -n 4 %s -- -p %s --nb-pools %s --nb-tcs %s " "--enable-rss" % (core_mask, eal_param, port_mask, str(npools), str(ntcs)) ) - self.dut.send_expect(command, "reading queues", 120) + self.sut_node.send_expect(command, "reading queues", 120) def create_pcaps(self, prios): """ @@ -93,19 +93,19 @@ class TestVmdqDcb(TestCase): """ payload = self.frame_size - HEADER_SIZE["ip"] - HEADER_SIZE["eth"] for prio in prios: - self.tester.scapy_append( + self.tg_node.scapy_append( 'flows = [Ether(dst="%s")/Dot1Q(vlan=0,prio=%d)/IP(src="1.2.3.4", dst="1.1.1.1")/("X"*%d)]' % (self.destmac_port, prio, payload) ) pcap = os.sep.join( [self.output_path, "%s%d.pcap" % (self.suite_name, prio)] ) - self.tester.scapy_append('wrpcap("%s", flows)' % pcap) - self.tester.scapy_execute() + self.tg_node.scapy_append('wrpcap("%s", flows)' % pcap) + self.tg_node.scapy_execute() def get_tgen_input(self, prios): - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) tgen_input = [] for prio in prios: pcap = os.sep.join( @@ -128,12 +128,12 @@ class TestVmdqDcb(TestCase): return fields_config def get_vmdq_stats(self): - vmdq_dcb_session = self.dut.new_session() - app_name = self.dut.apps_name["vmdq_dcb"].split("/")[-1] + vmdq_dcb_session = self.sut_node.new_session() + app_name = self.sut_node.apps_name["vmdq_dcb"].split("/")[-1] vmdq_dcb_session.send_expect( "kill -s SIGHUP `pgrep -fl %s | awk '{print $1}'`" % app_name, "#", 20 ) - out = self.dut.get_session_output() + out = self.sut_node.get_session_output() self.logger.info(out) return out @@ -163,16 +163,16 @@ class TestVmdqDcb(TestCase): # Transmit traffic tgen_input = self.get_tgen_input(self.prios) vm_config = self.set_fields(npools, npools) - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() # Start traffic transmission using approx 10% of line rate. ratePercent = 50 - # run packet generator + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, ratePercent, vm_config, self.tester.pktgen + tgen_input, ratePercent, vm_config, self.tg_node.perf_tg ) # set traffic option options = {"duration": 15} - loss = self.tester.pktgen.measure_loss(stream_ids=streams, options=options) + loss = self.tg_node.perf_tg.measure_loss(stream_ids=streams, options=options) self.logger.info( "loss is [loss rate, SendNumbers, ReceNumbers]{}!".format(loss) ) @@ -195,7 +195,7 @@ class TestVmdqDcb(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py b/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py index 6ec364c1..b1bc8a66 100644 --- a/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py +++ b/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py @@ -14,11 +14,11 @@ from copy import deepcopy import framework.rst as rst import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import UPDATE_EXPECTED, load_global_setting from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): @@ -27,33 +27,33 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): Run at the start of each test suite. """ self.build_vhost_app() - self.dut_ports = self.dut.get_ports() + self.sut_ports = self.sut_node.get_ports() self.number_of_ports = 1 - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("all", socket=self.ports_socket) + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("all", socket=self.ports_socket) self.vhost_core_list = self.cores[0:2] self.vuser0_core_list = self.cores[2:4] self.vhost_core_mask = utils.create_mask(self.vhost_core_list) - self.mem_channels = self.dut.get_memory_channels() + self.mem_channels = self.sut_node.get_memory_channels() # get cbdma device self.cbdma_dev_infos = [] self.dmas_info = None self.device_str = None self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") - self.base_dir = self.dut.base_dir.replace("~", "/root") - txport = self.tester.get_local_port(self.dut_ports[0]) - self.txItf = self.tester.get_interface(txport) + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + txport = self.tg_node.get_local_port(self.sut_ports[0]) + self.txItf = self.tg_node.get_interface(txport) self.virtio_user0_mac = "00:11:22:33:44:10" self.vm_num = 2 - self.app_testpmd_path = self.dut.apps_name["test-pmd"] - self.pktgen_helper = PacketGeneratorHelper() - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] + self.pktgen_helper = TrafficGeneratorStream() + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) self.frame_size = [64, 128, 256, 512, 1024, 1518] self.save_result_flag = True self.json_obj = {} @@ -62,10 +62,10 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): """ Run before each test case. """ - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -I dpdk-vhost", "#", 20) - self.dut.send_expect("killall -I dpdk-testpmd", "#", 20) - self.dut.send_expect("killall -I qemu-system-x86_64", "#", 20) + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -I dpdk-vhost", "#", 20) + self.sut_node.send_expect("killall -I dpdk-testpmd", "#", 20) + self.sut_node.send_expect("killall -I qemu-system-x86_64", "#", 20) # Prepare the result table self.table_header = ["Frame"] @@ -91,11 +91,11 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): self.nb_desc = self.test_parameters[64][0] def build_vhost_app(self): - out = self.dut.build_dpdk_apps("./examples/vhost") + out = self.sut_node.build_dpdk_apps("./examples/vhost") self.verify("Error" not in out, "compilation vhost error") def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -104,7 +104,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): """ launch the vhost app on vhost side """ - self.app_path = self.dut.apps_name["vhost"] + self.app_path = self.sut_node.apps_name["vhost"] socket_file_param = "--socket-file ./vhost-net" allow_option = "" for item in allow_pci: @@ -162,7 +162,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -190,7 +190,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -199,11 +199,11 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -212,14 +212,14 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): def config_stream(self, frame_size): tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - pkt = Packet(pkt_type="UDP", pkt_len=frame_size) - pkt.config_layer("ether", {"dst": self.virtio_user0_mac}) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=frame_size) + scapy_pkt_builder.config_layer("ether", {"dst": self.virtio_user0_mac}) pcap = os.path.join( self.out_path, "vswitch_pvp_multi_path_%s.pcap" % (frame_size) ) - pkt.save_pcapfile(self.tester, pcap) + scapy_pkt_builder.save_pcapfile(self.tg_node, pcap) tgen_input.append((rx_port, tx_port, pcap)) return tgen_input @@ -231,14 +231,14 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): ) tgenInput = self.config_stream(frame_size) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"duration": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) Mpps = pps / 1000000.0 @@ -375,7 +375,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -399,7 +399,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -421,7 +421,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -445,7 +445,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -467,7 +467,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -491,7 +491,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -515,7 +515,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -537,7 +537,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -561,7 +561,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -583,7 +583,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): cbdma_num = 1 self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) self.start_vhost_app(allow_pci=allow_pci) @@ -596,11 +596,11 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase): def close_all_session(self): if getattr(self, "vhost_user", None): - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) if getattr(self, "virtio-user0", None): - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.virtio_user0) if getattr(self, "virtio-user1", None): - self.dut.close_session(self.virtio_user1) + self.sut_node.close_session(self.virtio_user1) def tear_down(self): """ diff --git a/tests/TestSuite_vswitch_sample_cbdma.py b/tests/TestSuite_vswitch_sample_cbdma.py index 9fb6150b..631818f4 100644 --- a/tests/TestSuite_vswitch_sample_cbdma.py +++ b/tests/TestSuite_vswitch_sample_cbdma.py @@ -13,11 +13,11 @@ import string import time import framework.utils as utils -from framework.packet import Packet -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.settings import HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream from framework.virt_common import VM @@ -27,40 +27,40 @@ class TestVswitchSampleCBDMA(TestCase): Run at the start of each test suite. """ self.build_vhost_app() - self.tester_tx_port_num = 1 - self.dut_ports = self.dut.get_ports() - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) - self.cores = self.dut.get_core_list("all", socket=self.ports_socket) + self.tg_tx_port_num = 1 + self.sut_ports = self.sut_node.get_ports() + self.verify(len(self.sut_ports) >= 1, "Insufficient ports for testing") + self.ports_socket = self.sut_node.get_numa_id(self.sut_ports[0]) + self.cores = self.sut_node.get_core_list("all", socket=self.ports_socket) self.vhost_core_list = self.cores[0:2] self.vuser0_core_list = self.cores[2:4] self.vuser1_core_list = self.cores[4:6] self.vhost_core_mask = utils.create_mask(self.vhost_core_list) - self.mem_channels = self.dut.get_memory_channels() + self.mem_channels = self.sut_node.get_memory_channels() # get cbdma device self.cbdma_dev_infos = [] self.dmas_info = None self.device_str = None self.out_path = "/tmp" - out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") + out = self.tg_node.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: - self.tester.send_expect("mkdir -p %s" % self.out_path, "# ") - self.base_dir = self.dut.base_dir.replace("~", "/root") - txport = self.tester.get_local_port(self.dut_ports[0]) - self.txItf = self.tester.get_interface(txport) + self.tg_node.send_expect("mkdir -p %s" % self.out_path, "# ") + self.base_dir = self.sut_node.base_dir.replace("~", "/root") + txport = self.tg_node.get_local_port(self.sut_ports[0]) + self.txItf = self.tg_node.get_interface(txport) self.virtio_dst_mac0 = "00:11:22:33:44:10" self.virtio_dst_mac1 = "00:11:22:33:44:11" self.vm_dst_mac0 = "52:54:00:00:00:01" self.vm_dst_mac1 = "52:54:00:00:00:02" self.vm_num = 2 - self.app_testpmd_path = self.dut.apps_name["test-pmd"] + self.app_testpmd_path = self.sut_node.apps_name["test-pmd"] # create an instance to set stream field setting - self.pktgen_helper = PacketGeneratorHelper() - self.vhost_user = self.dut.new_session(suite="vhost-user") - self.virtio_user0 = self.dut.new_session(suite="virtio-user0") - self.virtio_user1 = self.dut.new_session(suite="virtio-user1") - self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0) - self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1) + self.pktgen_helper = TrafficGeneratorStream() + self.vhost_user = self.sut_node.new_session(suite="vhost-user") + self.virtio_user0 = self.sut_node.new_session(suite="virtio-user0") + self.virtio_user1 = self.sut_node.new_session(suite="virtio-user1") + self.virtio_user0_pmd = PmdOutput(self.sut_node, self.virtio_user0) + self.virtio_user1_pmd = PmdOutput(self.sut_node, self.virtio_user1) self.mrg_rxbuf = 0 self.in_order = 0 self.vectorized = 0 @@ -75,20 +75,20 @@ class TestVswitchSampleCBDMA(TestCase): """ Run before each test case. """ - self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.dut.send_expect("killall -I dpdk-vhost", "#", 20) - self.dut.send_expect("killall -I dpdk-testpmd", "#", 20) - self.dut.send_expect("killall -I qemu-system-x86_64", "#", 20) - self.vm_dut = [] + self.sut_node.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") + self.sut_node.send_expect("killall -I dpdk-vhost", "#", 20) + self.sut_node.send_expect("killall -I dpdk-testpmd", "#", 20) + self.sut_node.send_expect("killall -I qemu-system-x86_64", "#", 20) + self.vm_sut = [] self.vm = [] def build_vhost_app(self): - out = self.dut.build_dpdk_apps("./examples/vhost") + out = self.sut_node.build_dpdk_apps("./examples/vhost") self.verify("Error" not in out, "compilation vhost error") @property def check_2M_env(self): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# " ) return True if out == "2048" else False @@ -99,11 +99,11 @@ class TestVswitchSampleCBDMA(TestCase): """ launch the vhost app on vhost side """ - self.app_path = self.dut.apps_name["vhost"] + self.app_path = self.sut_node.apps_name["vhost"] socket_file_param = "" for item in range(socket_num): socket_file_param += "--socket-file ./vhost-net{} ".format(item) - allow_pci = [self.dut.ports_info[0]["pci"]] + allow_pci = [self.sut_node.ports_info[0]["pci"]] for item in range(cbdma_num): allow_pci.append(self.cbdma_dev_infos[item]) allow_option = "" @@ -208,8 +208,8 @@ class TestVswitchSampleCBDMA(TestCase): if packed: setting_args = setting_args + ",packed=on" for i in range(self.vm_num): - vm_dut = None - vm_info = VM(self.dut, "vm%d" % i, "vhost_sample") + vm_sut = None + vm_info = VM(self.sut_node, "vm%d" % i, "vhost_sample") vm_params = {} vm_params["driver"] = "vhost-user" if server_mode: @@ -224,13 +224,13 @@ class TestVswitchSampleCBDMA(TestCase): vm_info.set_vm_device(**vm_params) time.sleep(3) try: - vm_dut = vm_info.start(set_target=set_target, bind_dev=bind_dev) - if vm_dut is None: + vm_sut = vm_info.start(set_target=set_target, bind_dev=bind_dev) + if vm_sut is None: raise Exception("Set up VM ENV failed") except Exception as e: print((utils.RED("Failure for %s" % str(e)))) raise e - self.vm_dut.append(vm_dut) + self.vm_sut.append(vm_sut) self.vm.append(vm_info) def start_vm_testpmd(self, pmd_session): @@ -241,19 +241,19 @@ class TestVswitchSampleCBDMA(TestCase): param = "--rxq=1 --txq=1 --nb-cores=1 --txd=1024 --rxd=1024" pmd_session.start_testpmd(cores=self.vm_cores, param=param) - def repeat_bind_driver(self, dut, repeat_times=50): + def repeat_bind_driver(self, sut, repeat_times=50): i = 0 while i < repeat_times: - dut.unbind_interfaces_linux() - dut.bind_interfaces_linux(driver="virtio-pci") - dut.bind_interfaces_linux(driver="vfio-pci") + sut.unbind_interfaces_linux() + sut.bind_interfaces_linux(driver="virtio-pci") + sut.bind_interfaces_linux(driver="vfio-pci") i += 1 def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num): """ get all cbdma ports """ - out = self.dut.send_expect( + out = self.sut_node.send_expect( "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30 ) device_info = out.split("\n") @@ -281,7 +281,7 @@ class TestVswitchSampleCBDMA(TestCase): dmas_info += dmas self.dmas_info = dmas_info[:-1] self.device_str = " ".join(used_cbdma) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=%s %s" % (self.drivername, self.device_str), "# ", @@ -292,26 +292,26 @@ class TestVswitchSampleCBDMA(TestCase): """ Send a vlan packet with vlan id 1000 """ - pkt = Packet(pkt_type="VLAN_UDP", pkt_len=pkt_size) - pkt.config_layer("ether", {"dst": dts_mac}) - pkt.config_layer("vlan", {"vlan": 1000}) - pkt.send_pkt(self.tester, tx_port=self.txItf, count=pkt_count) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP", pkt_len=pkt_size) + scapy_pkt_builder.config_layer("ether", {"dst": dts_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 1000}) + scapy_pkt_builder.send_pkt(self.tg_node, tx_port=self.txItf, count=pkt_count) def verify_receive_packet(self, pmd_session, expected_pkt_count): out = pmd_session.execute_cmd("show port stats all") rx_num = re.compile("RX-packets: (.*?)\s+?").findall(out, re.S) self.verify( (int(rx_num[0]) >= int(expected_pkt_count)), - "Can't receive enough packets from tester", + "Can't receive enough packets from TG", ) def bind_cbdma_device_to_kernel(self): if self.device_str is not None: - self.dut.send_expect("modprobe ioatdma", "# ") - self.dut.send_expect( + self.sut_node.send_expect("modprobe ioatdma", "# ") + self.sut_node.send_expect( "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.device_str, "# ", @@ -320,17 +320,17 @@ class TestVswitchSampleCBDMA(TestCase): def config_stream(self, frame_size, dst_mac_list): tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) for dst_mac in dst_mac_list: payload_size = frame_size - self.headers_size - pkt = Packet(pkt_type="VLAN_UDP", pkt_len=payload_size) - pkt.config_layer("ether", {"dst": dst_mac}) - pkt.config_layer("vlan", {"vlan": 1000}) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP", pkt_len=payload_size) + scapy_pkt_builder.config_layer("ether", {"dst": dst_mac}) + scapy_pkt_builder.config_layer("vlan", {"vlan": 1000}) pcap = os.path.join( self.out_path, "vswitch_sample_cbdma_%s_%s.pcap" % (dst_mac, frame_size) ) - pkt.save_pcapfile(self.tester, pcap) + scapy_pkt_builder.save_pcapfile(self.tg_node, pcap) tgen_input.append((rx_port, tx_port, pcap)) return tgen_input @@ -346,14 +346,14 @@ class TestVswitchSampleCBDMA(TestCase): ) tgenInput = self.config_stream(frame_size, dst_mac_list) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, None, self.tester.pktgen + tgenInput, 100, None, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"duration": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) throughput = pps / 1000000.0 @@ -473,14 +473,14 @@ class TestVswitchSampleCBDMA(TestCase): def config_stream_imix(self, frame_sizes, dst_mac_list): tgen_input = [] - rx_port = self.tester.get_local_port(self.dut_ports[0]) - tx_port = self.tester.get_local_port(self.dut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[0]) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) for dst_mac in dst_mac_list: for frame_size in frame_sizes: payload_size = frame_size - self.headers_size - pkt = Packet() - pkt.assign_layers(["ether", "ipv4", "raw"]) - pkt.config_layers( + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.assign_layers(["ether", "ipv4", "raw"]) + scapy_pkt_builder.config_layers( [ ("ether", {"dst": "%s" % dst_mac}), ("ipv4", {"src": "1.1.1.1"}), @@ -491,7 +491,7 @@ class TestVswitchSampleCBDMA(TestCase): self.out_path, "vswitch_sample_cbdma_%s_%s.pcap" % (dst_mac, frame_size), ) - pkt.save_pcapfile(self.tester, pcap) + scapy_pkt_builder.save_pcapfile(self.tg_node, pcap) tgen_input.append((rx_port, tx_port, pcap)) return tgen_input @@ -508,14 +508,14 @@ class TestVswitchSampleCBDMA(TestCase): }, } # clear streams before add new streams - self.tester.pktgen.clear_streams() - # run packet generator + self.tg_node.perf_tg.clear_streams() + # run traffic generator streams = self.pktgen_helper.prepare_stream_from_tginput( - tgenInput, 100, fields_config, self.tester.pktgen + tgenInput, 100, fields_config, self.tg_node.perf_tg ) # set traffic option traffic_opt = {"delay": 5, "duration": 5} - _, pps = self.tester.pktgen.measure_throughput( + _, pps = self.tg_node.perf_tg.measure_throughput( stream_ids=streams, options=traffic_opt ) throughput = pps / 1000000.0 @@ -541,8 +541,8 @@ class TestVswitchSampleCBDMA(TestCase): out1 = self.virtio_user1_pmd.execute_cmd("show port stats all") rx_num0 = re.compile("RX-packets: (.*?)\s+?").findall(out0, re.S) rx_num1 = re.compile("RX-packets: (.*?)\s+?").findall(out1, re.S) - self.verify(int(rx_num0[0]) > 32, "virtio-user0 not receive pkts from tester") - self.verify(int(rx_num1[0]) > 32, "virtio-user1 not receive pkts from tester") + self.verify(int(rx_num0[0]) > 32, "virtio-user0 not receive pkts from TG") + self.verify(int(rx_num1[0]) > 32, "virtio-user1 not receive pkts from TG") return perf_result def test_perf_pvp_test_with_two_vm_and_two_cbdma_channels_using_vhost_async_driver( @@ -767,8 +767,8 @@ class TestVswitchSampleCBDMA(TestCase): bind_dev=True, vm_diff_param=True, ) - self.vm0_pmd = PmdOutput(self.vm_dut[0]) - self.vm1_pmd = PmdOutput(self.vm_dut[1]) + self.vm0_pmd = PmdOutput(self.vm_sut[0]) + self.vm1_pmd = PmdOutput(self.vm_sut[1]) self.start_vm_testpmd(self.vm0_pmd) self.start_vm_testpmd(self.vm1_pmd) self.set_testpmd0_param(self.vm0_pmd, self.vm_dst_mac1) @@ -793,16 +793,16 @@ class TestVswitchSampleCBDMA(TestCase): self.logger.info("After rebind VM Driver perf test") # repeat bind 50 time from virtio-pci to vfio-pci - self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50) - self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50) + self.repeat_bind_driver(sut=self.vm_sut[0], repeat_times=50) + self.repeat_bind_driver(sut=self.vm_sut[1], repeat_times=50) self.vhost_user.send_expect("^C", "# ", 20) self.start_vhost_app( with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True ) after_bind = self.start_vms_testpmd_and_test(need_start_vm=False) # repeat bind 50 time from virtio-pci to vfio-pci - self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50) - self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50) + self.repeat_bind_driver(sut=self.vm_sut[0], repeat_times=50) + self.repeat_bind_driver(sut=self.vm_sut[1], repeat_times=50) self.table_header = [ "Frame Size(Byte)", @@ -822,18 +822,18 @@ class TestVswitchSampleCBDMA(TestCase): """ set virtio device IP and run arp protocal """ - vm0_intf = self.vm_dut[0].ports_info[0]["intf"] - vm1_intf = self.vm_dut[1].ports_info[0]["intf"] - self.vm_dut[0].send_expect( + vm0_intf = self.vm_sut[0].ports_info[0]["intf"] + vm1_intf = self.vm_sut[1].ports_info[0]["intf"] + self.vm_sut[0].send_expect( "ifconfig %s %s" % (vm0_intf, self.virtio_ip0), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10 ) - self.vm_dut[0].send_expect( + self.vm_sut[0].send_expect( "arp -s %s %s" % (self.virtio_ip1, self.vm_dst_mac1), "#", 10 ) - self.vm_dut[1].send_expect( + self.vm_sut[1].send_expect( "arp -s %s %s" % (self.virtio_ip0, self.vm_dst_mac0), "#", 10 ) @@ -843,8 +843,8 @@ class TestVswitchSampleCBDMA(TestCase): """ iperf_server = "iperf -f g -s -i 1" iperf_client = "iperf -f g -c 1.1.1.2 -i 1 -t 60" - self.vm_dut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10) - self.vm_dut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60) + self.vm_sut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10) + self.vm_sut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60) time.sleep(90) def get_iperf_result(self): @@ -853,8 +853,8 @@ class TestVswitchSampleCBDMA(TestCase): """ self.table_header = ["Mode", "[M|G]bits/sec"] self.result_table_create(self.table_header) - self.vm_dut[0].send_expect("pkill iperf", "# ") - self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir) + self.vm_sut[0].send_expect("pkill iperf", "# ") + self.vm_sut[1].session.copy_file_from("%s/iperf_client.log" % self.sut_node.base_dir) fp = open("./iperf_client.log") fmsg = fp.read() fp.close() @@ -876,11 +876,11 @@ class TestVswitchSampleCBDMA(TestCase): results_row = ["vm2vm", iperfdata[-1]] self.result_table_add(results_row) - # print iperf resut + # print iperf result self.result_table_print() # rm the iperf log file in vm - self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10) - self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10) + self.vm_sut[0].send_expect("rm iperf_server.log", "#", 10) + self.vm_sut[1].send_expect("rm iperf_client.log", "#", 10) return float(iperfdata[-1].split()[0]) def check_scp_file_valid_between_vms(self, file_size=1024): @@ -891,17 +891,17 @@ class TestVswitchSampleCBDMA(TestCase): data = "" for char in range(file_size * 1024): data += random.choice(self.random_string) - self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") + self.vm_sut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ") # scp this file to vm1 - out = self.vm_dut[1].send_command( + out = self.vm_sut[1].send_command( "scp root@%s:/tmp/payload /root" % self.virtio_ip0, timeout=5 ) if "Are you sure you want to continue connecting" in out: - self.vm_dut[1].send_command("yes", timeout=3) - self.vm_dut[1].send_command(self.vm[0].password, timeout=3) + self.vm_sut[1].send_command("yes", timeout=3) + self.vm_sut[1].send_command(self.vm[0].password, timeout=3) # get the file info in vm1, and check it valid - md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ") - md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ") + md5_send = self.vm_sut[0].send_expect("md5sum /tmp/payload", "# ") + md5_revd = self.vm_sut[1].send_expect("md5sum /root/payload", "# ") md5_send = md5_send[: md5_send.find(" ")] md5_revd = md5_revd[: md5_revd.find(" ")] self.verify( @@ -919,8 +919,8 @@ class TestVswitchSampleCBDMA(TestCase): set_target=True, bind_dev=False, ) - self.vm0_pmd = PmdOutput(self.vm_dut[0]) - self.vm1_pmd = PmdOutput(self.vm_dut[1]) + self.vm0_pmd = PmdOutput(self.vm_sut[0]) + self.vm1_pmd = PmdOutput(self.vm_sut[1]) self.config_vm_env() self.check_scp_file_valid_between_vms() self.start_iperf_test() @@ -1001,17 +1001,17 @@ class TestVswitchSampleCBDMA(TestCase): def close_all_session(self): if getattr(self, "vhost_user", None): - self.dut.close_session(self.vhost_user) + self.sut_node.close_session(self.vhost_user) if getattr(self, "virtio-user0", None): - self.dut.close_session(self.virtio_user0) + self.sut_node.close_session(self.virtio_user0) if getattr(self, "virtio-user1", None): - self.dut.close_session(self.virtio_user1) + self.sut_node.close_session(self.virtio_user1) def tear_down(self): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() for i in range(len(self.vm)): self.vm[i].stop() self.vhost_user.send_expect("^C", "# ", 20) diff --git a/tests/TestSuite_vxlan.py b/tests/TestSuite_vxlan.py index 159e06b2..bb09f112 100644 --- a/tests/TestSuite_vxlan.py +++ b/tests/TestSuite_vxlan.py @@ -25,13 +25,12 @@ from scapy.route import * from scapy.sendrecv import sniff from scapy.utils import rdpcap, wrpcap -import framework.packet as packet +import framework.scapy_packet_builder as scapy_pkt_builder import framework.utils as utils -from framework.packet import IncreaseIP, IncreaseIPv6 -from framework.pktgen import PacketGeneratorHelper from framework.pmd_output import PmdOutput from framework.settings import FOLDERS, HEADER_SIZE from framework.test_case import TestCase +from framework.tg_perf import TrafficGeneratorStream # # @@ -55,7 +54,7 @@ class VxlanTestConfig(object): self.init() for name in kwargs: setattr(self, name, kwargs[name]) - self.pkt_obj = packet.Packet() + self.scapy_pkt_builder = scapy_pkt_builder.ScapyPacketBuilder() def init(self): self.packets_config() @@ -64,8 +63,8 @@ class VxlanTestConfig(object): """ Default vxlan packet format """ - self.pcap_file = packet.TMP_PATH + "vxlan.pcap" - self.capture_file = packet.TMP_PATH + "vxlan_capture.pcap" + self.pcap_file = scapy_pkt_builder.TMP_PATH + "vxlan.pcap" + self.capture_file = scapy_pkt_builder.TMP_PATH + "vxlan_capture.pcap" self.outer_mac_src = "00:00:10:00:00:00" self.outer_mac_dst = "11:22:33:44:55:66" self.outer_vlan = "N/A" @@ -109,7 +108,7 @@ class VxlanTestConfig(object): def create_pcap(self): """ - Create pcap file and copy it to tester if configured + Create pcap file and copy it to TG if configured Return scapy packet object for later usage """ if self.inner_l4_type == "SCTP": @@ -203,7 +202,7 @@ class VxlanTestConfig(object): if pkt is None: pkt = rdpcap(self.pcap_file) else: - pkt = pkt.pktgen.pkt + pkt = pkt.scapy_pkt_util.pkt time.sleep(1) if pkt[0].guess_payload_class(pkt[0]).name == "802.1Q": @@ -245,10 +244,10 @@ class VxlanTestConfig(object): """ Send vxlan pcap file by iface """ - del self.pkt_obj.pktgen.pkts[:] - self.pkt_obj.pktgen.assign_pkt(self.pkt) - self.pkt_obj.pktgen.update_pkts() - self.pkt_obj.send_pkt(crb=self.test_case.tester, tx_port=iface) + del self.scapy_pkt_builder.scapy_pkt_util.pkts[:] + self.scapy_pkt_builder.scapy_pkt_util.assign_pkt(self.pkt) + self.scapy_pkt_builder.scapy_pkt_util.update_pkts() + self.scapy_pkt_builder.send_pkt(node=self.test_case.tg_node, tx_port=iface) def pcap_len(self): """ @@ -281,37 +280,37 @@ class TestVxlan(TestCase): else: self.verify(False, "%s not support this vxlan" % self.nic) # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() # Verify that enough ports are available self.verify(len(ports) >= 2, "Insufficient ports for testing") global valports - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] self.portMask = utils.create_mask(valports[:2]) # Verify that enough threads are available - netdev = self.dut.ports_info[ports[0]]["port"] + netdev = self.sut_node.ports_info[ports[0]]["port"] self.ports_socket = netdev.socket # start testpmd - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) # init port config - self.dut_port = valports[0] - self.dut_port_mac = self.dut.get_mac_address(self.dut_port) - tester_port = self.tester.get_local_port(self.dut_port) - self.tester_iface = self.tester.get_interface(tester_port) + self.sut_port = valports[0] + self.sut_port_mac = self.sut_node.get_mac_address(self.sut_port) + tg_port = self.tg_node.get_local_port(self.sut_port) + self.tg_iface = self.tg_node.get_interface(tg_port) self.recv_port = valports[1] - tester_recv_port = self.tester.get_local_port(self.recv_port) - self.recv_iface = self.tester.get_interface(tester_recv_port) + tg_recv_port = self.tg_node.get_local_port(self.recv_port) + self.recv_iface = self.tg_node.get_interface(tg_recv_port) # invalid parameter self.invalid_mac = "00:00:00:00:01" self.invalid_ip = "192.168.1.256" self.invalid_vlan = 4097 self.invalid_queue = 64 - self.path = self.dut.apps_name["test-pmd"] + self.path = self.sut_node.apps_name["test-pmd"] # vxlan payload length for performance test # inner packet not contain crc, should need add four @@ -473,7 +472,7 @@ class TestVxlan(TestCase): }, ] - self.pktgen_helper = PacketGeneratorHelper() + self.pktgen_helper = TrafficGeneratorStream() def set_fields(self): fields_config = { @@ -486,38 +485,38 @@ class TestVxlan(TestCase): def suite_measure_throughput(self, tgen_input, use_vm=False): vm_config = self.set_fields() - self.tester.pktgen.clear_streams() + self.tg_node.perf_tg.clear_streams() streams = self.pktgen_helper.prepare_stream_from_tginput( - tgen_input, 100, vm_config if use_vm else None, self.tester.pktgen + tgen_input, 100, vm_config if use_vm else None, self.tg_node.perf_tg ) - result = self.tester.pktgen.measure_throughput(stream_ids=streams) + result = self.tg_node.perf_tg.measure_throughput(stream_ids=streams) return result def perf_tunnel_filter_set_rule(self, rule_config): rule_list = { # check inner mac + inner vlan filter can work - "imac-ivlan": f'flow create {rule_config.get("dut_port")} ingress pattern eth / ' + "imac-ivlan": f'flow create {rule_config.get("sut_port")} ingress pattern eth / ' f'ipv4 / udp / vxlan / eth dst is {rule_config.get("inner_mac_dst")} / ' f'vlan tci is {rule_config.get("inner_vlan")} / end actions pf / ' f'queue index {rule_config.get("queue")} / end', # check inner mac + inner vlan + tunnel id filter can work - "imac-ivlan-tenid": f'flow create {rule_config.get("dut_port")} ingress pattern eth / ' + "imac-ivlan-tenid": f'flow create {rule_config.get("sut_port")} ingress pattern eth / ' f'ipv4 / udp / vxlan vni is {rule_config.get("vni")} / ' f'eth dst is {rule_config.get("inner_mac_dst")} / ' f'vlan tci is {rule_config.get("inner_vlan")} / ' f'end actions pf / queue index {rule_config.get("queue")} / end', # check inner mac + tunnel id filter can work - "imac-tenid": f'flow create {rule_config.get("dut_port")} ingress pattern eth / ' + "imac-tenid": f'flow create {rule_config.get("sut_port")} ingress pattern eth / ' f'ipv4 / udp / vxlan vni is {rule_config.get("vni")} / ' f'eth dst is {rule_config.get("inner_mac_dst")} / end actions pf / ' f'queue index {rule_config.get("queue")} / end', # check inner mac filter can work - "imac": f'flow create {rule_config.get("dut_port")} ingress pattern eth / ' + "imac": f'flow create {rule_config.get("sut_port")} ingress pattern eth / ' f'ipv4 / udp / vxlan / eth dst is {rule_config.get("inner_mac_dst")} / end actions pf / ' f'queue index {rule_config.get("queue")} / end', # check outer mac + inner mac + tunnel id filter can work - "omac-imac-tenid": f'flow create {rule_config.get("dut_port")} ingress pattern ' + "omac-imac-tenid": f'flow create {rule_config.get("sut_port")} ingress pattern ' f'eth dst is {rule_config.get("outer_mac_dst")} / ' f'ipv4 / udp / vxlan vni is {rule_config.get("vni")} / ' f'eth dst is {rule_config.get("inner_mac_dst")} / ' @@ -528,7 +527,7 @@ class TestVxlan(TestCase): msg = "not support format" self.logger.error(msg) return - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) pat = "Flow rule #\d+ created" self.verify(re.findall(pat, out, re.M), "Flow rule create failed") @@ -546,13 +545,13 @@ class TestVxlan(TestCase): config = VxlanTestConfig(self, **kwargs) # now cloud filter will default enable L2 mac filter, so dst mac must # be same - config.outer_mac_dst = self.dut_port_mac + config.outer_mac_dst = self.sut_port_mac config.create_pcap() - self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_port) - config.send_pcap(self.tester_iface) + self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_port) + config.send_pcap(self.tg_iface) # check whether detect vxlan type - out = self.dut.get_session_output(timeout=2) + out = self.sut_node.get_session_output(timeout=2) print(out) self.verify(config.packet_type() in out, "Vxlan Packet not detected") @@ -572,13 +571,13 @@ class TestVxlan(TestCase): # if packet outer L3 is ipv6, should not enable hardware checksum if outer_ipv6: - self.csum_set_sw("outer-ip", self.dut_port) + self.csum_set_sw("outer-ip", self.sut_port) self.csum_set_sw("outer-ip", self.recv_port) config = VxlanTestConfig(self, **args) # now cloud filter will default enable L2 mac filter, so dst mac must # be same - config.outer_mac_dst = self.dut_port_mac + config.outer_mac_dst = self.sut_port_mac # csum function will not auto add outer ip src address already, so update send packet src ip address if config.outer_ip6_src != "N/A": config.outer_ip6_src = config.outer_ip6_src @@ -604,23 +603,23 @@ class TestVxlan(TestCase): self.logger.info("vxlan packet %s" % arg_str) - out = self.dut.send_expect("start", "testpmd>", 10) + out = self.sut_node.send_expect("start", "testpmd>", 10) # create pcap file with supplied arguments config = VxlanTestConfig(self, **kwargs) - config.outer_mac_dst = self.dut_port_mac + config.outer_mac_dst = self.sut_port_mac config.create_pcap() # save the capture packet into pcap format - inst = self.tester.tcpdump_sniff_packets(self.recv_iface) - config.send_pcap(self.tester_iface) - pkt = self.tester.load_tcpdump_sniff_packets(inst, timeout=3) + inst = self.tg_node.tcpdump_sniff_packets(self.recv_iface) + config.send_pcap(self.tg_iface) + pkt = self.tg_node.load_tcpdump_sniff_packets(inst, timeout=3) # extract the checksum offload from saved pcap file chksums = config.get_chksums(pkt=pkt) self.logger.info("chksums" + str(chksums)) - out = self.dut.send_expect("stop", "testpmd>", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) print(out) # verify detected l4 invalid checksum @@ -654,10 +653,10 @@ class TestVxlan(TestCase): # send vxlan packet config.create_pcap() - self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_port) - config.send_pcap(self.tester_iface) - out = self.dut.get_session_output(timeout=2) + self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_port) + config.send_pcap(self.tg_iface) + out = self.sut_node.get_session_output(timeout=2) print(out) queue = -1 @@ -670,9 +669,9 @@ class TestVxlan(TestCase): self.verify(queue_id == int(queue), "invalid receive queue") # del rule - args = [self.dut_port] + args = [self.sut_port] self.tunnel_filter_del(*args) - self.dut.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("stop", "testpmd>", 10) def test_vxlan_ipv4_detect(self): """ @@ -681,22 +680,22 @@ class TestVxlan(TestCase): if self.nic in ["ICE_25G-E810C_SFP", "ICE_100G-E810C_QSFP"]: print("Intel® Ethernet 700 Series support default none VECTOR") src_vec_model = "n" - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/5C/1T", socket=self.ports_socket ) - self.dut.send_expect( + self.sut_node.send_expect( r"./%s %s -- -i --disable-rss --rxq=4 --txq=4 --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, self.portMask), "testpmd>", 30, ) - self.dut.send_expect("set fwd rxonly", "testpmd>", 10) - self.dut.send_expect("set verbose 1", "testpmd>", 10) - self.enable_vxlan(self.dut_port) + self.sut_node.send_expect("set fwd rxonly", "testpmd>", 10) + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) # check normal packet self.send_and_detect(outer_udp_dst=1234) # check vxlan + UDP inner packet @@ -710,8 +709,8 @@ class TestVxlan(TestCase): # check vlan vxlan + vlan inner packet self.send_and_detect(outer_vlan=1, inner_vlan=1) - out = self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "#", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "#", 10) def test_vxlan_ipv6_detect(self): """ @@ -721,22 +720,22 @@ class TestVxlan(TestCase): print("Intel® Ethernet 700 Series support default none VECTOR") src_vec_model = "n" - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/5C/1T", socket=self.ports_socket ) - self.dut.send_expect( + self.sut_node.send_expect( r"./%s %s -- -i --disable-rss --rxq=4 --txq=4 --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, self.portMask), "testpmd>", 30, ) - self.dut.send_expect("set fwd rxonly", "testpmd>", 10) - self.dut.send_expect("set verbose 1", "testpmd>", 10) - self.enable_vxlan(self.dut_port) + self.sut_node.send_expect("set fwd rxonly", "testpmd>", 10) + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) # check normal ipv6 packet self.send_and_detect( outer_ip6_src="FE80:0:0:0:0:0:0:0", @@ -762,8 +761,8 @@ class TestVxlan(TestCase): inner_l4_type="SCTP", ) - out = self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "#", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "#", 10) def test_vxlan_ipv4_checksum_offload(self): """ @@ -771,11 +770,11 @@ class TestVxlan(TestCase): """ # start testpmd with 2queue/1port - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/5C/1T", socket=self.ports_socket ) - self.dut.send_expect( + self.sut_node.send_expect( r"./%s %s -- -i --portmask=%s --enable-rx-cksum" % (self.path, self.eal_para, self.portMask), "testpmd>", @@ -784,21 +783,21 @@ class TestVxlan(TestCase): self.iperr_num = 0 # disable vlan filter - self.dut.send_expect("vlan set filter off %d" % self.dut_port, "testpmd") + self.sut_node.send_expect("vlan set filter off %d" % self.sut_port, "testpmd") # enable tx checksum offload - self.dut.send_expect("set fwd csum", "testpmd>", 10) - self.dut.send_expect("port stop all", "testpmd>") + self.sut_node.send_expect("set fwd csum", "testpmd>", 10) + self.sut_node.send_expect("port stop all", "testpmd>") self.csum_set_type("ip", self.recv_port) self.csum_set_type("outer-ip", self.recv_port) self.csum_set_type("udp", self.recv_port) self.csum_set_type("tcp", self.recv_port) self.csum_set_type("sctp", self.recv_port) - self.dut.send_expect("port start all", "testpmd>") - self.dut.send_expect("csum parse-tunnel on %d" % self.recv_port, "testpmd>", 10) + self.sut_node.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("csum parse-tunnel on %d" % self.recv_port, "testpmd>", 10) - self.enable_vxlan(self.dut_port) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) # check normal packet + ip checksum invalid self.send_and_check(outer_ip_invalid=1, outer_udp_dst=1234) # check vxlan packet + inner ip checksum invalid @@ -835,7 +834,7 @@ class TestVxlan(TestCase): # check vlan vxlan packet + inner vlan + inner sctp checksum invalid self.send_and_check(outer_vlan=1, inner_l4_invalid=1, inner_l4_type="SCTP") - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("quit", "#", 10) def test_vxlan_ipv6_checksum_offload(self): """ @@ -844,11 +843,11 @@ class TestVxlan(TestCase): """ # start testpmd with 2queue/1port - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/5C/1T", socket=self.ports_socket ) - self.dut.send_expect( + self.sut_node.send_expect( r"./%s %s -- -i --portmask=%s --enable-rx-cksum" % (self.path, self.eal_para, self.portMask), "testpmd>", @@ -857,19 +856,19 @@ class TestVxlan(TestCase): self.iperr_num = 0 # disable vlan filter - self.dut.send_expect("vlan set filter off %d" % self.dut_port, "testpmd") + self.sut_node.send_expect("vlan set filter off %d" % self.sut_port, "testpmd") # enable tx checksum offload - self.dut.send_expect("set fwd csum", "testpmd>", 10) + self.sut_node.send_expect("set fwd csum", "testpmd>", 10) self.csum_set_type("outer-ip", self.recv_port) self.csum_set_type("udp", self.recv_port) self.csum_set_type("outer-udp", self.recv_port) self.csum_set_type("tcp", self.recv_port) self.csum_set_type("sctp", self.recv_port) - self.dut.send_expect("csum parse-tunnel on %d" % self.recv_port, "testpmd>", 10) + self.sut_node.send_expect("csum parse-tunnel on %d" % self.recv_port, "testpmd>", 10) - self.enable_vxlan(self.dut_port) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) # check normal ipv6 packet self.send_and_check( outer_ip6_src="FE80:0:0:0:0:0:0:0", outer_ip6_dst="FE80:0:0:0:0:0:0:1" @@ -942,39 +941,39 @@ class TestVxlan(TestCase): inner_vlan=1, ) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("quit", "#", 10) def test_tunnel_filter(self): """ verify tunnel filter feature """ - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/5C/1T", socket=self.ports_socket ) - self.dut.send_expect( + self.sut_node.send_expect( r"./%s %s -- -i --disable-rss --rxq=%d --txq=%d --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, MAX_TXQ_RXQ, MAX_TXQ_RXQ, self.portMask), "testpmd>", 30, ) - self.dut.send_expect("set fwd rxonly", "testpmd>", 10) - self.dut.send_expect("set verbose 1", "testpmd>", 10) - self.enable_vxlan(self.dut_port) + self.sut_node.send_expect("set fwd rxonly", "testpmd>", 10) + self.sut_node.send_expect("set verbose 1", "testpmd>", 10) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) config = VxlanTestConfig(self) config_vlan = VxlanTestConfig(self, inner_vlan=1) - config.outer_mac_dst = self.dut_port_mac - config_vlan.outer_mac_dst = self.dut_port_mac + config.outer_mac_dst = self.sut_port_mac + config_vlan.outer_mac_dst = self.sut_port_mac expect_queue = randint(1, MAX_TXQ_RXQ - 1) rule_list = [ # check inner mac + inner vlan filter can work "flow create {} ingress pattern eth / ipv4 / udp / vxlan / eth dst is {} / vlan tci is {} / end actions pf " "/ queue index {} / end".format( - self.dut_port, + self.sut_port, config_vlan.inner_mac_dst, config_vlan.inner_vlan, expect_queue, @@ -982,7 +981,7 @@ class TestVxlan(TestCase): # check inner mac + inner vlan + tunnel id filter can work "flow create {} ingress pattern eth / ipv4 / udp / vxlan vni is {} / eth dst is {} " "/ vlan tci is {} / end actions pf / queue index {} / end".format( - self.dut_port, + self.sut_port, config_vlan.vni, config_vlan.inner_mac_dst, config_vlan.inner_vlan, @@ -991,15 +990,15 @@ class TestVxlan(TestCase): # check inner mac + tunnel id filter can work "flow create {} ingress pattern eth / ipv4 / udp / vxlan vni is {} / eth dst is {} / end actions pf " "/ queue index {} / end".format( - self.dut_port, config.vni, config.inner_mac_dst, expect_queue + self.sut_port, config.vni, config.inner_mac_dst, expect_queue ), # check inner mac filter can work "flow create {} ingress pattern eth / ipv4 / udp / vxlan / eth dst is {} / end actions pf / queue index {} " - "/ end".format(self.dut_port, config.inner_mac_dst, expect_queue), + "/ end".format(self.sut_port, config.inner_mac_dst, expect_queue), # check outer mac + inner mac + tunnel id filter can work "flow create {} ingress pattern eth dst is {} / ipv4 / udp / vxlan vni is {} / eth dst is {} " "/ end actions pf / queue index {} / end".format( - self.dut_port, + self.sut_port, config.outer_mac_dst, config.vni, config.inner_mac_dst, @@ -1007,7 +1006,7 @@ class TestVxlan(TestCase): ) # iip not supported by now # 'flow create {} ingress pattern eth / ipv4 / udp / vxlan / eth / ipv4 dst is {} / end actions pf ' - # '/ queue index {} / end'.format(self.dut_port, + # '/ queue index {} / end'.format(self.sut_port, # config.inner_ip_dst, # queue) ] @@ -1018,7 +1017,7 @@ class TestVxlan(TestCase): else: self.filter_and_check(rule, config, expect_queue) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("quit", "#", 10) def test_tunnel_filter_invalid(self): """ @@ -1028,26 +1027,26 @@ class TestVxlan(TestCase): queue_id = 3 config = VxlanTestConfig(self) - config.outer_mac_dst = self.dut_port_mac + config.outer_mac_dst = self.sut_port_mac - self.eal_para = self.dut.create_eal_parameters( + self.eal_para = self.sut_node.create_eal_parameters( cores="1S/5C/1T", socket=self.ports_socket ) - self.dut.send_expect( + self.sut_node.send_expect( r"./%s %s -- -i --disable-rss --rxq=4 --txq=4 --nb-cores=4 --portmask=%s" % (self.path, self.eal_para, self.portMask), "testpmd>", 30, ) - self.enable_vxlan(self.dut_port) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) rule = ( "flow create {} ingress pattern eth / ipv4 / udp / vxlan vni is {} / eth dst is {} / end actions pf " "/ queue index {} / end".format( - self.dut_port, config.vni, self.invalid_mac, queue_id + self.sut_port, config.vni, self.invalid_mac, queue_id ) ) out = self.tunnel_filter_add_nocheck(rule) @@ -1056,7 +1055,7 @@ class TestVxlan(TestCase): rule = ( "flow create {} ingress pattern eth / ipv4 / udp / vxlan vni is {} / eth / ipv4 dst is {} " "/ end actions pf / queue index {} / end".format( - self.dut_port, config.vni, self.invalid_ip, queue_id + self.sut_port, config.vni, self.invalid_ip, queue_id ) ) out = self.tunnel_filter_add_nocheck(rule) @@ -1064,7 +1063,7 @@ class TestVxlan(TestCase): # testpmd is not support # rule = 'flow create {} ingress pattern eth / ipv4 / udp / vxlan vni is {} / eth dst is {} / vlan vid is {} ' \ - # '/ end actions pf / queue index {} / end'.format(self.dut_port, + # '/ end actions pf / queue index {} / end'.format(self.sut_port, # config.vni, # config.inner_mac_dst, # self.invalid_vlan, @@ -1075,20 +1074,20 @@ class TestVxlan(TestCase): rule = ( "flow create {} ingress pattern eth / ipv4 / udp / vxlan vni is {} / eth dst is {} / end actions pf " "/ queue index {} / end".format( - self.dut_port, config.vni, config.inner_mac_dst, self.invalid_queue + self.sut_port, config.vni, config.inner_mac_dst, self.invalid_queue ) ) out = self.tunnel_filter_add_nocheck(rule) self.verify("Invalid queue ID" in out, "Failed to detect invalid queue") - self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "#", 10) + self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "#", 10) - def config_tunnelfilter(self, dut_port, recv_port, perf_config, pcapfile): + def config_tunnelfilter(self, sut_port, recv_port, perf_config, pcapfile): pkts = [] config = VxlanTestConfig(self, payload_size=self.vxlan_payload - 4) config.inner_vlan = self.default_vlan - config.outer_mac_dst = self.dut.get_mac_address(dut_port) + config.outer_mac_dst = self.sut_node.get_mac_address(sut_port) config.pcap_file = pcapfile tun_filter = perf_config["tunnel_filter"] @@ -1097,11 +1096,11 @@ class TestVxlan(TestCase): if tun_filter == "None" and recv_queue == "Multi": print((utils.RED("RSS and Tunel filter can't enable in the same time"))) else: - self.enable_vxlan(dut_port) + self.enable_vxlan(sut_port) if tun_filter != "None": rule_config = { - "dut_port": dut_port, + "sut_port": sut_port, "outer_mac_dst": config.outer_mac_dst, "inner_mac_dst": config.inner_mac_dst, "inner_ip_dst": config.inner_ip_dst, @@ -1112,9 +1111,9 @@ class TestVxlan(TestCase): } self.perf_tunnel_filter_set_rule(rule_config) - if perf_config["Packet"] == "Normal": + if perf_config["ScapyTrafficGenerator"] == "Normal": config.outer_udp_dst = 63 - config.outer_mac_dst = self.dut.get_mac_address(dut_port) + config.outer_mac_dst = self.sut_node.get_mac_address(sut_port) config.payload_size = ( PACKET_LEN - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] - HEADER_SIZE["udp"] ) @@ -1138,7 +1137,7 @@ class TestVxlan(TestCase): pkts.append(pkt) rule_config = { - "dut_port": dut_port, + "sut_port": sut_port, "outer_mac_dst": config.outer_mac_dst, "inner_mac_dst": config.inner_mac_dst, "inner_ip_dst": config.inner_ip_dst, @@ -1151,7 +1150,7 @@ class TestVxlan(TestCase): # save pkt list into pcap file wrpcap(config.pcap_file, pkts) - self.tester.session.copy_file_to(config.pcap_file) + self.tg_node.session.copy_file_to(config.pcap_file) def combine_pcap(self, dest_pcap, src_pcap): pkts = rdpcap(dest_pcap) @@ -1165,7 +1164,7 @@ class TestVxlan(TestCase): def test_perf_vxlan_tunnelfilter_performance_2ports(self): self.result_table_create(self.tunnel_header) - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( "1S/%dC/1T" % (self.tunnel_multiqueue * 2 + 1), socket=self.ports_socket ) @@ -1190,39 +1189,39 @@ class TestVxlan(TestCase): "./%s %s -- -i --rss-udp --rxq=2 --txq=2 --nb-cores=4 --portmask=%s" ) - self.eal_para = self.dut.create_eal_parameters(cores=core_list) + self.eal_para = self.sut_node.create_eal_parameters(cores=core_list) pmd_cmd = pmd_temp % (self.path, self.eal_para, self.portMask) - self.dut.send_expect(pmd_cmd, "testpmd> ", 100) + self.sut_node.send_expect(pmd_cmd, "testpmd> ", 100) # config flow self.config_tunnelfilter( - self.dut_port, self.recv_port, perf_config, "flow1.pcap" + self.sut_port, self.recv_port, perf_config, "flow1.pcap" ) # config the flows tgen_input = [] tgen_input.append( ( - self.tester.get_local_port(self.dut_port), - self.tester.get_local_port(self.recv_port), + self.tg_node.get_local_port(self.sut_port), + self.tg_node.get_local_port(self.recv_port), "flow1.pcap", ) ) if BIDIRECT: self.config_tunnelfilter( - self.recv_port, self.dut_port, perf_config, "flow2.pcap" + self.recv_port, self.sut_port, perf_config, "flow2.pcap" ) tgen_input.append( ( - self.tester.get_local_port(self.recv_port), - self.tester.get_local_port(self.dut_port), + self.tg_node.get_local_port(self.recv_port), + self.tg_node.get_local_port(self.sut_port), "flow2.pcap", ) ) - self.dut.send_expect("set fwd io", "testpmd>", 10) - self.dut.send_expect("start", "testpmd>", 10) - self.pmdout.wait_link_status_up(self.dut_port) + self.sut_node.send_expect("set fwd io", "testpmd>", 10) + self.sut_node.send_expect("start", "testpmd>", 10) + self.pmdout.wait_link_status_up(self.sut_port) if BIDIRECT: wirespeed = self.wirespeed(self.nic, PACKET_LEN, 2) else: @@ -1236,8 +1235,8 @@ class TestVxlan(TestCase): perf_config["Mpps"] = pps perf_config["pct"] = pps * 100 / wirespeed - out = self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "# ", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "# ", 10) # verify every queue work fine check_queue = 0 @@ -1263,26 +1262,26 @@ class TestVxlan(TestCase): def test_perf_vxlan_checksum_performance_2ports(self): self.result_table_create(self.chksum_header) vxlan = VxlanTestConfig(self, payload_size=self.vxlan_payload) - vxlan.outer_mac_dst = self.dut.get_mac_address(self.dut_port) + vxlan.outer_mac_dst = self.sut_node.get_mac_address(self.sut_port) vxlan.pcap_file = "vxlan1.pcap" vxlan.inner_mac_dst = "00:00:20:00:00:01" vxlan.create_pcap() vxlan_queue = VxlanTestConfig(self, payload_size=self.vxlan_payload) - vxlan_queue.outer_mac_dst = self.dut.get_mac_address(self.dut_port) + vxlan_queue.outer_mac_dst = self.sut_node.get_mac_address(self.sut_port) vxlan_queue.pcap_file = "vxlan1_1.pcap" vxlan_queue.inner_mac_dst = "00:00:20:00:00:02" vxlan_queue.create_pcap() # socket/core/thread - core_list = self.dut.get_core_list( + core_list = self.sut_node.get_core_list( "1S/%dC/1T" % (self.tunnel_multiqueue * 2 + 1), socket=self.ports_socket ) core_mask = utils.create_mask(core_list) - self.dut_ports = self.dut.get_ports_performance(force_different_nic=False) - tx_port = self.tester.get_local_port(self.dut_ports[0]) - rx_port = self.tester.get_local_port(self.dut_ports[1]) + self.sut_ports = self.sut_node.get_ports_performance(force_different_nic=False) + tx_port = self.tg_node.get_local_port(self.sut_ports[0]) + rx_port = self.tg_node.get_local_port(self.sut_ports[1]) for cal in self.cal_type: recv_queue = cal["recvqueue"] @@ -1309,18 +1308,18 @@ class TestVxlan(TestCase): else: pmd_temp = "./%s %s -- -i --nb-cores=2 --portmask=%s" - self.eal_para = self.dut.create_eal_parameters(cores=core_list) + self.eal_para = self.sut_node.create_eal_parameters(cores=core_list) pmd_cmd = pmd_temp % (self.path, self.eal_para, self.portMask) - self.dut.send_expect(pmd_cmd, "testpmd> ", 100) - self.dut.send_expect("set fwd csum", "testpmd>", 10) - self.enable_vxlan(self.dut_port) + self.sut_node.send_expect(pmd_cmd, "testpmd> ", 100) + self.sut_node.send_expect("set fwd csum", "testpmd>", 10) + self.enable_vxlan(self.sut_port) self.enable_vxlan(self.recv_port) - self.pmdout.wait_link_status_up(self.dut_port) + self.pmdout.wait_link_status_up(self.sut_port) # redirect flow to another queue by tunnel filter rule_config = { - "dut_port": self.dut_port, + "sut_port": self.sut_port, "outer_mac_dst": vxlan.outer_mac_dst, "inner_mac_dst": vxlan.inner_mac_dst, "inner_ip_dst": vxlan.inner_ip_dst, @@ -1333,7 +1332,7 @@ class TestVxlan(TestCase): if recv_queue == "Multi": rule_config = { - "dut_port": self.dut_port, + "sut_port": self.sut_port, "outer_mac_dst": vxlan_queue.outer_mac_dst, "inner_mac_dst": vxlan_queue.inner_mac_dst, "inner_ip_dst": vxlan_queue.inner_ip_dst, @@ -1345,10 +1344,10 @@ class TestVxlan(TestCase): self.perf_tunnel_filter_set_rule(rule_config) for pro in cal["csum"]: - self.csum_set_type(pro, self.dut_port) + self.csum_set_type(pro, self.sut_port) self.csum_set_type(pro, self.recv_port) - self.dut.send_expect("start", "testpmd>", 10) + self.sut_node.send_expect("start", "testpmd>", 10) wirespeed = self.wirespeed(self.nic, PACKET_LEN, 1) @@ -1359,8 +1358,8 @@ class TestVxlan(TestCase): cal["Mpps"] = pps cal["pct"] = pps * 100 / wirespeed - out = self.dut.send_expect("stop", "testpmd>", 10) - self.dut.send_expect("quit", "# ", 10) + out = self.sut_node.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("quit", "# ", 10) # verify every queue work fine check_queue = 1 @@ -1377,35 +1376,35 @@ class TestVxlan(TestCase): self.result_table_print() def enable_vxlan(self, port): - self.dut.send_expect( + self.sut_node.send_expect( "rx_vxlan_port add %d %d" % (VXLAN_PORT, port), "testpmd>", 10 ) def csum_set_type(self, proto, port): - self.dut.send_expect("port stop all", "testpmd>") - out = self.dut.send_expect("csum set %s hw %d" % (proto, port), "testpmd>", 10) - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd>") + out = self.sut_node.send_expect("csum set %s hw %d" % (proto, port), "testpmd>", 10) + self.sut_node.send_expect("port start all", "testpmd>") self.verify("Bad arguments" not in out, "Failed to set vxlan csum") self.verify("error" not in out, "Failed to set vxlan csum") def csum_set_sw(self, proto, port): - self.dut.send_expect("port stop all", "testpmd>") - out = self.dut.send_expect("csum set %s sw %d" % (proto, port), "testpmd>", 10) - self.dut.send_expect("port start all", "testpmd>") + self.sut_node.send_expect("port stop all", "testpmd>") + out = self.sut_node.send_expect("csum set %s sw %d" % (proto, port), "testpmd>", 10) + self.sut_node.send_expect("port start all", "testpmd>") self.verify("Bad arguments" not in out, "Failed to set vxlan csum") self.verify("error" not in out, "Failed to set vxlan csum") def tunnel_filter_add(self, rule): - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) self.verify("Flow rule #0 created" in out, "Flow rule create failed") return out def tunnel_filter_add_nocheck(self, rule): - out = self.dut.send_expect(rule, "testpmd>", 3) + out = self.sut_node.send_expect(rule, "testpmd>", 3) return out def tunnel_filter_del(self, *args): - out = self.dut.send_expect("flow flush 0", "testpmd>", 10) + out = self.sut_node.send_expect("flow flush 0", "testpmd>", 10) return out def set_up(self): @@ -1418,7 +1417,7 @@ class TestVxlan(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/TestSuite_vxlan_gpe_support_in_i40e.py b/tests/TestSuite_vxlan_gpe_support_in_i40e.py index bb854b70..b4619e1d 100644 --- a/tests/TestSuite_vxlan_gpe_support_in_i40e.py +++ b/tests/TestSuite_vxlan_gpe_support_in_i40e.py @@ -62,7 +62,7 @@ class VxlanGpeTestConfig(object): def create_pcap(self): """ - Create pcap file and copy it to tester + Create pcap file and copy it to TG """ self.inner_payload = "X" * self.payload_size @@ -95,9 +95,9 @@ class VxlanGpeTestConfig(object): """ Send vxlan pcap file by iface """ - self.test_case.tester.scapy_append('pcap = rdpcap("%s")' % self.pcap_file) - self.test_case.tester.scapy_append('sendp(pcap, iface="%s")' % iface) - self.test_case.tester.scapy_execute() + self.test_case.tg_node.scapy_append('pcap = rdpcap("%s")' % self.pcap_file) + self.test_case.tg_node.scapy_append('sendp(pcap, iface="%s")' % iface) + self.test_case.tg_node.scapy_execute() class TestVxlanGpeSupportInI40e(TestCase): @@ -115,30 +115,30 @@ class TestVxlanGpeSupportInI40e(TestCase): ]: self.verify(False, "%s not support this vxlan-gpe" % self.nic) # Based on h/w type, choose how many ports to use - ports = self.dut.get_ports() + ports = self.sut_node.get_ports() # Verify that enough ports are available self.verify(len(ports) >= 2, "Insufficient ports for testing") global valports - valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] + valports = [_ for _ in ports if self.tg_node.get_local_port(_) != -1] self.portMask = utils.create_mask(valports[:2]) # Verify that enough threads are available - netdev = self.dut.ports_info[ports[0]]["port"] + netdev = self.sut_node.ports_info[ports[0]]["port"] self.ports_socket = netdev.socket - cores = self.dut.get_core_list("all", socket=self.ports_socket) + cores = self.sut_node.get_core_list("all", socket=self.ports_socket) self.verify(cores is not None, "Insufficient cores for speed testing") self.coremask = utils.create_mask(cores) # start testpmd - self.pmdout = PmdOutput(self.dut) + self.pmdout = PmdOutput(self.sut_node) # init port config - self.dut_port = valports[0] - self.dut_port_mac = self.dut.get_mac_address(self.dut_port) - tester_port = self.tester.get_local_port(self.dut_port) - self.tester_iface = self.tester.get_interface(tester_port) + self.sut_port = valports[0] + self.sut_port_mac = self.sut_node.get_mac_address(self.sut_port) + tg_port = self.tg_node.get_local_port(self.sut_port) + self.tg_iface = self.tg_node.get_interface(tg_port) def set_up(self): """ @@ -161,10 +161,10 @@ class TestVxlanGpeSupportInI40e(TestCase): # now cloud filter will default enable L2 mac filter, so dst mac must # be same - config.outer_mac_dst = self.dut_port_mac + config.outer_mac_dst = self.sut_port_mac args = [ - self.dut_port, + self.sut_port, config.outer_mac_dst, config.inner_mac_dst, config.inner_ip_dst, @@ -180,7 +180,7 @@ class TestVxlanGpeSupportInI40e(TestCase): if remove is True: queue_id = 0 args = [ - self.dut_port, + self.sut_port, config.outer_mac_dst, config.inner_mac_dst, config.inner_ip_dst, @@ -193,9 +193,9 @@ class TestVxlanGpeSupportInI40e(TestCase): # send vxlan packet config.create_pcap() - self.dut.send_expect("start", "testpmd>", 10) - config.send_pcap(self.tester_iface) - out = self.dut.get_session_output(timeout=2) + self.sut_node.send_expect("start", "testpmd>", 10) + config.send_pcap(self.tg_iface) + out = self.sut_node.get_session_output(timeout=2) queue = -1 pattern = re.compile("- Receive queue=0x(\d)") @@ -206,7 +206,7 @@ class TestVxlanGpeSupportInI40e(TestCase): # verify received in expected queue self.verify(queue_id == int(queue), "invalid receive queue") - self.dut.send_expect("stop", "testpmd>", 10) + self.sut_node.send_expect("stop", "testpmd>", 10) def test_vxlan_gpe_ipv4_detect(self): self.pmdout.start_testpmd("all") @@ -223,15 +223,15 @@ class TestVxlanGpeSupportInI40e(TestCase): 'sendp([Ether(dst="%s")/IP(src="18.0.0.1")/UDP(dport=%d, sport=43)/' % (mac, VXLAN_GPE_PORT) + 'VXLAN(flags=12)/IP(src="10.0.0.1")], iface="%s", count=1)' - % self.tester_iface + % self.tg_iface ) cwd = os.getcwd() dir_vxlan_module = cwd + r"/" + FOLDERS["Depends"] - self.tester.scapy_append("sys.path.append('%s')" % dir_vxlan_module) - self.tester.scapy_append("from vxlan import VXLAN") - self.tester.scapy_append(packet) - self.tester.scapy_execute() - out = self.dut.get_session_output(timeout=5) + self.tg_node.scapy_append("sys.path.append('%s')" % dir_vxlan_module) + self.tg_node.scapy_append("from vxlan import VXLAN") + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() + out = self.sut_node.get_session_output(timeout=5) print(out) self.verify( "L3_IPV4_EXT_UNKNOWN" in out and "%s" % VXLAN_GPE_PORT in out, @@ -242,11 +242,11 @@ class TestVxlanGpeSupportInI40e(TestCase): self.pmdout.execute_cmd( "port config 0 udp_tunnel_port rm vxlan-gpe %s" % VXLAN_GPE_PORT ) - self.tester.scapy_append("sys.path.append('%s')" % dir_vxlan_module) - self.tester.scapy_append("from vxlan import VXLAN") - self.tester.scapy_append(packet) - self.tester.scapy_execute() - out = self.dut.get_session_output(timeout=5) + self.tg_node.scapy_append("sys.path.append('%s')" % dir_vxlan_module) + self.tg_node.scapy_append("from vxlan import VXLAN") + self.tg_node.scapy_append(packet) + self.tg_node.scapy_execute() + out = self.sut_node.get_session_output(timeout=5) print(out) self.pmdout.execute_cmd("quit", "#") self.verify( @@ -255,7 +255,7 @@ class TestVxlanGpeSupportInI40e(TestCase): ) def enable_vxlan(self, port): - self.dut.send_expect( + self.sut_node.send_expect( "rx_vxlan_port add %d %d" % (VXLAN_GPE_PORT, port), "testpmd>", 10 ) @@ -265,7 +265,7 @@ class TestVxlanGpeSupportInI40e(TestCase): # filter_type # (imac-ivlan|imac-ivlan-tenid|imac-tenid|imac|omac-imac-tenid|iip) # tenant_id queue_num - out = self.dut.send_expect( + out = self.sut_node.send_expect( "tunnel_filter add %d " % args[0] + "%s %s %s " % (args[1], args[2], args[3]) + "%d vxlan-gpe %s " % (args[4], args[5]) @@ -277,7 +277,7 @@ class TestVxlanGpeSupportInI40e(TestCase): self.verify("error" not in out, "Failed to add tunnel filter") def tunnel_filter_del(self, *args): - out = self.dut.send_expect( + out = self.sut_node.send_expect( "tunnel_filter rm %d " % args[0] + "%s %s %s " % (args[1], args[2], args[3]) + "%d vxlan-gpe %s " % (args[4], args[5]) @@ -292,7 +292,7 @@ class TestVxlanGpeSupportInI40e(TestCase): """ Run after each test case. """ - self.dut.kill_all() + self.sut_node.kill_all() def tear_down_all(self): """ diff --git a/tests/bonding.py b/tests/bonding.py index 30f55c7f..20c15129 100644 --- a/tests/bonding.py +++ b/tests/bonding.py @@ -14,8 +14,8 @@ from scapy.utils import wrpcap import framework.utils as utils from framework.exception import TimeoutException, VerifyFailure -from framework.packet import TMP_PATH, Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import TMP_PATH, ScapyPacketBuilder from framework.settings import HEADER_SIZE # define bonding mode @@ -43,11 +43,11 @@ class PmdBonding(object): # set parent instance self.parent = kwargs.get("parent") # set target source code directory - self.target_source = self.parent.dut.base_dir + self.target_source = self.parent.sut.base_dir # set logger self.logger = self.parent.logger self.verify = self.parent.verify - # select packet generator + # select traffic generator self.pktgen_name = "ixia" if self.is_perf else "scapy" # traffic default config self.default_pkt_size = kwargs.get("pkt_size") or FRAME_SIZE_64 @@ -58,11 +58,11 @@ class PmdBonding(object): self.default_dst_port = kwargs.get("dst_port") self.default_pkt_name = kwargs.get("pkt_name") # testpmd - self.testpmd = PmdOutput(self.parent.dut) + self.testpmd = PmdOutput(self.parent.sut) self.testpmd_status = "close" # - # On tester platform, packet transmission + # On TG platform, packet transmission # def mac_str_to_int(self, mac_str): """convert the MAC type from the string into the int.""" @@ -130,16 +130,16 @@ class PmdBonding(object): pktlen = frame_size - headers_size return pktlen - def set_stream_to_slave_port(self, dut_port_id): + def set_stream_to_slave_port(self, sut_port_id): """ - use framework/packet.py module to create one stream, send stream to + use framework/scapy_packet_builder.py module to create one stream, send stream to slave port """ # get dst port mac address pkt_name = self.default_pkt_name destport = self.default_dst_port destip = self.default_dst_ip - dst_mac = self.get_port_info(dut_port_id, "mac") + dst_mac = self.get_port_info(sut_port_id, "mac") # packet size pktlen = self.get_pkt_len(pkt_name) # set stream configuration @@ -163,17 +163,17 @@ class PmdBonding(object): savePath = os.sep.join([TMP_PATH, "pkt_{0}.pcap".format(pkt_name)]) pkt_type = pkt_config.get("type") pkt_layers = pkt_config.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type.upper()) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type.upper()) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - pkt.save_pcapfile(filename=savePath) - streams.append(pkt.pktgen.pkt) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + scapy_pkt_builder.save_pcapfile(filename=savePath) + streams.append(scapy_pkt_builder.scapy_pkt_util.pkt) return streams def set_stream_to_bond_port(self, bond_port, slaves): """ - : use framework/packet.py module to create multiple streams + : use framework/scapy_packet_builder.py module to create multiple streams send streams from bond port to slaves :param bond_port: bonded device port id @@ -213,11 +213,11 @@ class PmdBonding(object): savePath = os.sep.join([TMP_PATH, "pkt_{0}.pcap".format(stm_name)]) pkt_type = values.get("type") pkt_layers = values.get("pkt_layers") - pkt = Packet(pkt_type=pkt_type.upper()) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type.upper()) for layer in list(pkt_layers.keys()): - pkt.config_layer(layer, pkt_layers[layer]) - pkt.save_pcapfile(filename=savePath) - streams.append(pkt.pktgen.pkt) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + scapy_pkt_builder.save_pcapfile(filename=savePath) + streams.append(scapy_pkt_builder.scapy_pkt_util.pkt) return streams @@ -225,7 +225,7 @@ class PmdBonding(object): tx_iface = kwargs.get("port topo")[0] # set interface ready to send packet cmd = "ifconfig {0} up".format(tx_iface) - self.parent.tester.send_expect(cmd, "# ", 30) + self.parent.tg_node.send_expect(cmd, "# ", 30) send_pkts = kwargs.get("stream") # stream config stream_configs = kwargs.get("traffic configs") @@ -235,7 +235,7 @@ class PmdBonding(object): sendp(send_pkts, iface=tx_iface, inter=interval, verbose=False, count=count) def send_packets_by_ixia(self, **kwargs): - tester_port = kwargs.get("tx_intf") + tg_port = kwargs.get("tx_intf") count = kwargs.get("count", 1) traffic_type = kwargs.get("traffic_type", "normal") traffic_time = kwargs.get("traffic_time", 0) @@ -245,8 +245,8 @@ class PmdBonding(object): self.tgen_input = [] tgen_input = self.tgen_input # generate packet contain multi stream - for pkt in list(self.packet_types.values()): - send_pkts.append(pkt.pktgen.pkt) + for scapy_pkt_builder in list(self.packet_types.values()): + send_pkts.append(scapy_pkt_builder.scapy_pkt_util.pkt) ixia_pkt = os.sep.join([self.target_source, "bonding_ixia.pcap"]) wrpcap(ixia_pkt, send_pkts) # ---------------------------------------------------------------- @@ -265,28 +265,28 @@ class PmdBonding(object): # calculate packets dropped in sleep time self.n_pkts = int((sleep / (1 / expect_pps)) * (1 / pause_rate)) # ---------------------------------------------------------------- - tester_port = self.parent.tester.get_local_port(self.parent.dut_ports[0]) - tgen_input.append((tester_port, tester_port, ixia_pkt)) + tg_port = self.parent.tg_node.get_local_port(self.parent.sut_ports[0]) + tgen_input.append((tg_port, tg_port, ixia_pkt)) # run latency stat statistics - self.parent.tester.loop_traffic_generator_throughput( + self.parent.tg_node.loop_traffic_generator_throughput( tgen_input, self.rate_percent ) def stop_ixia(self, data_types="packets"): - tester_inst = self.parent.tester + tg_inst = self.parent.tg_node # get ixia statistics - line_rate = tester_inst.get_port_line_rate() - rx_bps, rx_pps = tester_inst.stop_traffic_generator_throughput_loop( + line_rate = tg_inst.get_port_line_rate() + rx_bps, rx_pps = tg_inst.stop_traffic_generator_throughput_loop( self.tgen_input ) - output = tester_inst.traffic_get_port_stats(self.tgen_input) + output = tg_inst.traffic_get_port_stats(self.tgen_input) self.cur_data["ixia statistics"] = [] append = self.cur_data["ixia statistics"].append append("send packets: {0}".format(output[0])) append("line_rate: {0}".format(line_rate[0])) append("rate_percent: {0}%".format(self.rate_percent)) - def get_pktgen(self, name): + def get_perf_tg(self, name): pkt_gens = { "ixia": self.send_packets_by_ixia, "scapy": self.send_packets_by_scapy, @@ -302,19 +302,19 @@ class PmdBonding(object): time.sleep(2) # start traffic self.logger.info("begin transmission ...") - pktgen = self.get_pktgen(self.pktgen_name) - result = pktgen(**traffic_config) + perf_tg = self.get_perf_tg(self.pktgen_name) + result = perf_tg(**traffic_config) # end traffic self.logger.info("complete transmission") return result # - # On dut, dpdk testpmd common methods + # On SUT, dpdk testpmd common methods # def check_process_status(self, process_name="testpmd"): cmd = "ps aux | grep -i %s | grep -v grep | awk {'print $2'}" % (process_name) - out = self.parent.dut.alt_session.send_expect(cmd, "# ", 10) + out = self.parent.sut.alt_session.send_expect(cmd, "# ", 10) status = True if out != "" else False return status @@ -322,7 +322,7 @@ class PmdBonding(object): status = self.check_process_status(process_name) if not status: msg = "{0} process exceptional quit".format(process_name) - out = self.parent.dut.session.session.get_output_all() + out = self.parent.sut.session.session.get_output_all() self.logger.info(out) raise VerifyFailure(msg) @@ -361,7 +361,7 @@ class PmdBonding(object): except Exception as e: self.testpmd_status = "close" msg = "execute '{0}' timeout".format(item[0]) - output = out = self.parent.dut.session.session.get_output_all() + output = out = self.parent.sut.session.session.get_output_all() self.logger.error(output) raise Exception(msg) @@ -495,26 +495,26 @@ class PmdBonding(object): return stats - def set_tester_port_status(self, port_name, status): + def set_tg_port_status(self, port_name, status): """ Do some operations to the network interface port, such as "up" or "down". """ - eth = self.parent.tester.get_interface(port_name) - self.parent.tester.admin_ports_linux(eth, status) + eth = self.parent.tg_node.get_interface(port_name) + self.parent.tg_node.admin_ports_linux(eth, status) time.sleep(5) - def set_dut_peer_port_by_id(self, port_id, status): - # stop peer port on tester - intf = self.parent.tester.get_local_port(self.parent.dut_ports[port_id]) - self.set_tester_port_status(intf, status) + def set_sut_peer_port_by_id(self, port_id, status): + # stop peer port on TG + intf = self.parent.tg_node.get_local_port(self.parent.sut_ports[port_id]) + self.set_tg_port_status(intf, status) time.sleep(5) cur_status = self.get_port_info(port_id, "link_status") self.logger.info("port {0} is [{1}]".format(port_id, cur_status)) if cur_status != status: self.logger.warning("expected status is [{0}]".format(status)) - def set_dut_port_status(self, port_id, status): + def set_sut_port_status(self, port_id, status): opt = "link-up" if status == "up" else "link-down" # stop slave link by force cmd = "set {0} port {1}".format(opt, port_id) @@ -614,7 +614,7 @@ class PmdBonding(object): return None # - # On dut, dpdk testpmd common bonding methods + # On SUT, dpdk testpmd common bonding methods # def get_bonding_config(self, config_content, args): """ diff --git a/tests/compress_common.py b/tests/compress_common.py index aa591270..e71ea8af 100644 --- a/tests/compress_common.py +++ b/tests/compress_common.py @@ -29,7 +29,7 @@ default_eals = {"l": "0-3", "c": None, "n": None, "w": None, "vdev": None} def get_qat_device_list(test_case): device_id = conf.suite_cfg["qat_device_id"] - out = test_case.dut.send_expect( + out = test_case.sut_node.send_expect( "lspci -d:{}|awk '{{print $1}}'".format(device_id), "# ", 10 ) device_list = out.replace("\r", "\n").replace("\n\n", "\n").split("\n") @@ -39,7 +39,7 @@ def get_qat_device_list(test_case): def bind_qat_device(test_case, driver="igb_uio"): if driver == "vfio-pci": - test_case.dut.send_expect("modprobe vfio-pci", "#", 10) + test_case.sut_node.send_expect("modprobe vfio-pci", "#", 10) else: driver = "igb_uio" @@ -47,7 +47,7 @@ def bind_qat_device(test_case, driver="igb_uio"): device_list = get_qat_device_list(test_case) device_id = conf.suite_cfg["qat_device_id"] - test_case.dut.send_expect( + test_case.sut_node.send_expect( 'echo "8086 {}" > /sys/bus/pci/drivers/{}/new_id'.format(device_id, driver), "# ", 10, @@ -56,9 +56,9 @@ def bind_qat_device(test_case, driver="igb_uio"): cmd = "echo 0000:{} > /sys/bus/pci/devices/0000\:{}/driver/unbind".format( line, line.replace(":", "\:") ) - test_case.dut.send_expect(cmd, "# ", 10) + test_case.sut_node.send_expect(cmd, "# ", 10) cmd = "echo 0000:{} > /sys/bus/pci/drivers/{}/bind".format(line, driver) - test_case.dut.send_expect(cmd, "# ", 10) + test_case.sut_node.send_expect(cmd, "# ", 10) def get_opt_str(test_case, default_opts={}, override_opts={}): @@ -83,7 +83,7 @@ def get_opt_str(test_case, default_opts={}, override_opts={}): def get_input_file(test_case): case_cfg = conf.load_case_config(test_case._suite_result.test_case) input_file = conf.suite_cfg["input-file"] - out = test_case.dut.send_expect("ls %s" % input_file, "# ", 10) + out = test_case.sut_node.send_expect("ls %s" % input_file, "# ", 10) if out == input_file: file_list = [input_file] else: @@ -93,9 +93,9 @@ def get_input_file(test_case): def run_unit(test_case, eal={}): - cores = test_case.dut.get_core_list("1S/3C/1T") + cores = test_case.sut_node.get_core_list("1S/3C/1T") core_mask = utils.create_mask(cores) - mem_channels = test_case.dut.get_memory_channels() + mem_channels = test_case.sut_node.get_memory_channels() default = default_eals.copy() default["l"] = None @@ -104,11 +104,11 @@ def run_unit(test_case, eal={}): eal_str = get_opt_str(test_case, default, eal) cmdline = "./{app_path} {eal}".format( - app_path=test_case.dut.apps_name["test"], eal=eal_str + app_path=test_case.sut_node.apps_name["test"], eal=eal_str ) - test_case.dut.send_expect(cmdline, ">", 30) - out = test_case.dut.send_expect("compressdev_autotest", ">", 30) - test_case.dut.send_expect("quit", "# ", 30) + test_case.sut_node.send_expect(cmdline, ">", 30) + out = test_case.sut_node.send_expect("compressdev_autotest", ">", 30) + test_case.sut_node.send_expect("quit", "# ", 30) print(out) test_case.verify("Test OK" in out, "Test Failed") @@ -126,13 +126,13 @@ def run_compress_func(test_case, eal={}, opt={}): -- --input-file {file} {opt}" cmdline = cmdline.format( - app_path=test_case.dut.apps_name["test-compress-perf"], + app_path=test_case.sut_node.apps_name["test-compress-perf"], eal=eal_str, file=each_file, opt=opt_str, ) - out = test_case.dut.send_expect(cmdline, "# ", 300) + out = test_case.sut_node.send_expect(cmdline, "# ", 300) test_case.verify( "failed" not in out and "FATAL" not in out, "Test Failed: Parameter or the value error", @@ -166,14 +166,14 @@ def run_compress_perf(test_case, eal={}, opt={}): -- --input-file {file} --seg-sz {seg} {opt}" cmdline = cmdline.format( - app_path=test_case.dut.apps_name["test-compress-perf"], + app_path=test_case.sut_node.apps_name["test-compress-perf"], eal=eal_str, file=perf_file, seg=each_seg * 1024, opt=opt_str, ) - out = test_case.dut.send_expect(cmdline, "# ", 300) + out = test_case.sut_node.send_expect(cmdline, "# ", 300) test_case.verify( "failed" not in out and "FATAL" not in out, "Test Failed: Parameter or the value error", diff --git a/tests/cryptodev_common.py b/tests/cryptodev_common.py index b550b468..9d02a5c6 100644 --- a/tests/cryptodev_common.py +++ b/tests/cryptodev_common.py @@ -10,19 +10,19 @@ conf = SuiteConf("cryptodev_sample") def bind_qat_device(test_case, driver="igb_uio"): if driver == "vfio-pci": - test_case.dut.send_expect("modprobe vfio", "#", 10) - test_case.dut.send_expect("modprobe vfio-pci", "#", 10) + test_case.sut_node.send_expect("modprobe vfio", "#", 10) + test_case.sut_node.send_expect("modprobe vfio-pci", "#", 10) if "crypto_dev_id" in conf.suite_cfg: dev_id = conf.suite_cfg["crypto_dev_id"] test_case.logger.info( "specified the qat hardware device id in cfg: {}".format(dev_id) ) - out = test_case.dut.send_expect( + out = test_case.sut_node.send_expect( "lspci -D -d:{}|awk '{{print $1}}'".format(dev_id), "# ", 10 ) else: - out = test_case.dut.send_expect( + out = test_case.sut_node.send_expect( "lspci -D | grep QuickAssist |awk '{{print $1}}'", "# ", 10 ) @@ -36,7 +36,7 @@ def bind_qat_device(test_case, driver="igb_uio"): domain_id = addr_array[0] bus_id = addr_array[1] devfun_id = addr_array[2] - pf_port = GetNicObj(test_case.dut, domain_id, bus_id, devfun_id) + pf_port = GetNicObj(test_case.sut_node, domain_id, bus_id, devfun_id) sriov_vfs_pci = pf_port.get_sriov_vfs_pci() if not sriov_vfs_pci: @@ -44,7 +44,7 @@ def bind_qat_device(test_case, driver="igb_uio"): dev[line.strip()] = sriov_vfs_pci - test_case.dut.bind_eventdev_port(driver, " ".join(sriov_vfs_pci)) + test_case.sut_node.bind_eventdev_port(driver, " ".join(sriov_vfs_pci)) if not dev: raise Exception("can not find qat device") @@ -84,13 +84,13 @@ default_eal_opts = { def get_eal_opt_str(test_case, override_eal_opts={}, add_port=False): - cores = ",".join(test_case.dut.get_core_list("1S/3C/1T")) + cores = ",".join(test_case.sut_node.get_core_list("1S/3C/1T")) if "l" in conf.suite_cfg: cores = conf.suite_cfg["l"] default_eal_opts.update({"l": cores}) if "socket-mem" in conf.suite_cfg: default_eal_opts.update({"socket-mem": (conf.suite_cfg["socket-mem"])}) - mem_channel = test_case.dut.get_memory_channels() + mem_channel = test_case.sut_node.get_memory_channels() default_eal_opts.update({"n": mem_channel}) return get_opt_str(test_case, default_eal_opts, override_eal_opts, add_port) @@ -107,7 +107,7 @@ def get_opt_str(test_case, default_opts, override_opts={}, add_port=False): # Update options with func input opts.update(override_opts) - pci_list = [port["pci"] for port in test_case.dut.ports_info] + pci_list = [port["pci"] for port in test_case.sut_node.ports_info] if "a" in list(opts.keys()) and opts["a"]: pci_list.append(opts["a"]) if add_port and pci_list: diff --git a/tests/flexible_common.py b/tests/flexible_common.py index 63b4f8d3..e05a6239 100644 --- a/tests/flexible_common.py +++ b/tests/flexible_common.py @@ -5,19 +5,19 @@ import re import time -from framework.packet import Packet from framework.pmd_output import PmdOutput +from framework.scapy_packet_builder import ScapyPacketBuilder class FlexibleRxdBase(object): - def init_base(self, pci, dst_mac, test_type, dut_index=0): - tester_port_id = self.tester.get_local_port(self.dut_ports[dut_index]) - self.__tester_intf = self.tester.get_interface(tester_port_id) - self.__src_mac = self.tester.get_mac(tester_port_id) + def init_base(self, pci, dst_mac, test_type, sut_index=0): + tg_port_id = self.tg_node.get_local_port(self.sut_ports[sut_index]) + self.__tg_intf = self.tg_node.get_interface(tg_port_id) + self.__src_mac = self.tg_node.get_mac(tg_port_id) self.__dst_mac = dst_mac - self.__app_path = self.dut.apps_name["test-pmd"] - self.__pmdout = PmdOutput(self.dut) + self.__app_path = self.sut_node.apps_name["test-pmd"] + self.__pmdout = PmdOutput(self.sut_node) self.__test_type = test_type self.__pci = pci self.__pkg_count = 1 @@ -94,7 +94,7 @@ class FlexibleRxdBase(object): "clear port stats all", "start", ] - [self.dut.send_expect(cmd, "testpmd> ", 15) for cmd in cmds] + [self.sut_node.send_expect(cmd, "testpmd> ", 15) for cmd in cmds] def close_testpmd(self): if not self.__is_pmd_on: @@ -106,12 +106,12 @@ class FlexibleRxdBase(object): pass def __send_pkts_and_get_output(self, pkt_str): - pkt = Packet(pkt_str) - pkt.send_pkt( - self.tester, tx_port=self.__tester_intf, count=self.__pkg_count, timeout=30 + scapy_pkt_builder = ScapyPacketBuilder(pkt_str) + scapy_pkt_builder.send_pkt( + self.tg_node, tx_port=self.__tg_intf, count=self.__pkg_count, timeout=30 ) time.sleep(0.5) - output = self.dut.get_session_output(timeout=3) + output = self.sut_node.get_session_output(timeout=3) return output def __verify_common(self, pkts_list, msg=None): @@ -134,25 +134,25 @@ class FlexibleRxdBase(object): def replace_pkg(self, pkg="comms"): ice_pkg_path = "".join([self.ddp_dir, "ice.pkg"]) if pkg == "os_default": - self.dut.send_expect( + self.sut_node.send_expect( "cp {} {}".format(self.os_default_pkg, ice_pkg_path), "# " ) if pkg == "comms": - self.dut.send_expect("cp {} {}".format(self.comms_pkg, ice_pkg_path), "# ") - self.dut.send_expect( + self.sut_node.send_expect("cp {} {}".format(self.comms_pkg, ice_pkg_path), "# ") + self.sut_node.send_expect( "echo {0} > /sys/bus/pci/devices/{0}/driver/unbind".format(self.pci), "# ", 60, ) - self.dut.send_expect( + self.sut_node.send_expect( "echo {} > /sys/bus/pci/drivers/ice/bind".format(self.pci), "# ", 60 ) - self.dut.send_expect( + self.sut_node.send_expect( "./usertools/dpdk-devbind.py --force --bind=vfio-pci {}".format(self.pci), "# ", 60, ) - dmesg_out = self.dut.send_expect("dmesg | grep Package | tail -1", "#") + dmesg_out = self.sut_node.send_expect("dmesg | grep Package | tail -1", "#") package_version = re.search("version (.*)", dmesg_out).group(1) self.logger.info("package version:{}".format(package_version)) self.verify( @@ -273,13 +273,13 @@ class FlexibleRxdBase(object): else ["RXDID[18]", "RXDID[19]", "RXDID[21]", "RXDID[22]"], 16, ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth {}/ ipv4 src is 192.168.0.1 dst is 192.168.0.2 tos is 23 ttl is 98 / end actions queue index 2 / end".format( "" if self.__is_iavf else "dst is {} ".format(self.__dst_mac) ), "created", ) - self.dut.send_expect( + self.sut_node.send_expect( "flow create 0 ingress pattern eth / ipv6 src is 2001::3 dst is 2001::4 tc is 12 / end actions queue index 3 / end", "created", ) @@ -310,8 +310,8 @@ class FlexibleRxdBase(object): self.__verify_common([[pkts_str, fields_list2]], msg2) # send TCP - self.dut.send_expect("flow flush 0", "testpmd>") - self.dut.send_expect( + self.sut_node.send_expect("flow flush 0", "testpmd>") + self.sut_node.send_expect( "flow create 0 ingress pattern eth {0}/ ipv4 src is 192.168.0.1 dst is 192.168.0.2 / tcp src is 25 dst is 23 / end actions queue index {1} / end".format( "" if self.__is_iavf else "dst is {} ".format(self.__dst_mac), 4, @@ -339,7 +339,7 @@ class FlexibleRxdBase(object): "{port_opt}" ).format( **{ - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "pci": self.__pci, "param_type": param_type, "port_opt": port_opt, @@ -368,7 +368,7 @@ class FlexibleRxdBase(object): "{port_opt}" ).format( **{ - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "pci": self.__pci, "param_type": param_type, "port_opt": port_opt, diff --git a/tests/perf_test_base.py b/tests/perf_test_base.py index 8175a53f..345863a2 100644 --- a/tests/perf_test_base.py +++ b/tests/perf_test_base.py @@ -20,13 +20,8 @@ import numpy as np import framework.utils as utils from framework.config import SuiteConf from framework.exception import VerifyFailure -from framework.packet import Packet -from framework.pktgen import ( - PKTGEN_IXIA, - PKTGEN_IXIA_NETWORK, - PKTGEN_TREX, - TRANSMIT_CONT, -) +from framework.scapy_packet_builder import ScapyPacketBuilder +from framework.settings import TG_IXEXPLORER, TG_IXNETWORK, TG_TREX, TRANSMIT_CONT from framework.utils import convert_int2ip, convert_ip2int VF_L3FWD_NIC_SUPPORT = frozenset( @@ -157,16 +152,16 @@ class PerfTestBase(object): @property def __target_dir(self): target_dir = ( - "/root" + self.dut.base_dir[1:] - if self.dut.base_dir.startswith("~") - else self.dut.base_dir + "/root" + self.sut_node.base_dir[1:] + if self.sut_node.base_dir.startswith("~") + else self.sut_node.base_dir ) return target_dir @property def __sockets(self): sockets = [ - cpu.get("socket") for cpu in self.dut.get_all_cores() if cpu.get("socket") + cpu.get("socket") for cpu in self.sut_node.get_all_cores() if cpu.get("socket") ] total = len(set(sockets)) self.verify(total > 0, "cpu socket should not be zero") @@ -174,7 +169,7 @@ class PerfTestBase(object): @property def __core_thread_num(self): - cpu_topos = self.dut.get_all_cores() + cpu_topos = self.sut_node.get_all_cores() core_index = cpu_topos[-1]["core"] thread_index = int(cpu_topos[-1]["thread"]) if not core_index: @@ -194,7 +189,7 @@ class PerfTestBase(object): def d_con(self, cmd): _cmd = [cmd, "# ", 10] if isinstance(cmd, (str)) else cmd - return self.dut.send_expect(*_cmd) + return self.sut_node.send_expect(*_cmd) def __get_ipv4_lpm_vm_config(self, lpm_config): netaddr, mask = lpm_config.split("/") @@ -243,14 +238,14 @@ class PerfTestBase(object): return layers, fields_config def __get_pkt_layers(self, pkt_type): - if pkt_type in list(Packet.def_packet.keys()): - return deepcopy(Packet.def_packet.get(pkt_type).get("layers")) + if pkt_type in list(ScapyPacketBuilder.def_packet.keys()): + return deepcopy(ScapyPacketBuilder.def_packet.get(pkt_type).get("layers")) local_def_packet = { "IPv6_RAW": ["ether", "ipv6", "raw"], } layers = local_def_packet.get(pkt_type) if not layers: - msg = f"{pkt_type} not set in framework/packet.py, nor in local" + msg = f"{pkt_type} not set in framework/scapy_pkt_builder.py, nor in local" raise VerifyFailure(msg) return layers @@ -263,7 +258,7 @@ class PerfTestBase(object): return pktlen def __get_frame_size(self, name, frame_size): - if self.pktgen_type is PKTGEN_IXIA_NETWORK: + if self.tg_type is TG_IXNETWORK: # ixNetwork api server will set ipv6 packet size to 78 bytes when # set frame size < 78. _frame_size = 78 if name is IP_TYPE.V6 and frame_size == 64 else frame_size @@ -313,19 +308,19 @@ class PerfTestBase(object): pkt_type = pkt_config.get("type") pkt_layers = pkt_config.get("pkt_layers") _layers = self.__get_pkt_layers(pkt_type) - if pkt_type not in Packet.def_packet.keys(): - pkt = Packet() - pkt.pkt_cfgload = True - pkt.assign_layers(_layers) + if pkt_type not in ScapyPacketBuilder.def_packet.keys(): + scapy_pkt_builder = ScapyPacketBuilder() + scapy_pkt_builder.pkt_cfgload = True + scapy_pkt_builder.assign_layers(_layers) else: - pkt = Packet(pkt_type=pkt_type) + scapy_pkt_builder = ScapyPacketBuilder(pkt_type=pkt_type) for layer in list(pkt_layers.keys()): if layer not in _layers: continue - pkt.config_layer(layer, pkt_layers[layer]) - self.logger.debug(pformat(pkt.pktgen.pkt.command())) + scapy_pkt_builder.config_layer(layer, pkt_layers[layer]) + self.logger.debug(pformat(scapy_pkt_builder.scapy_pkt_util.pkt.command())) - return pkt.pktgen.pkt + return scapy_pkt_builder.scapy_pkt_util.pkt def __get_mac_layer(self, port_id): if self.__mode is SUITE_TYPE.VF: @@ -354,7 +349,7 @@ class PerfTestBase(object): }, } else: - dmac = self.dut.get_mac_address(port_id) + dmac = self.sut_node.get_mac_address(port_id) layer = { "ether": { "dst": dmac, @@ -419,7 +414,7 @@ class PerfTestBase(object): def __add_stream_to_pktgen(self, streams, option): def port(index): - p = self.tester.get_local_port(self.__valports[index]) + p = self.tg_node.get_local_port(self.__valports[index]) return p topos = ( @@ -442,8 +437,8 @@ class PerfTestBase(object): _option["pcap"] = pkt if fields_config: _option["fields_config"] = fields_config - stream_id = self.tester.pktgen.add_stream(txport, rxport, pkt) - self.tester.pktgen.config_stream(stream_id, _option) + stream_id = self.tg_node.perf_tg.add_stream(txport, rxport, pkt) + self.tg_node.perf_tg.config_stream(stream_id, _option) stream_ids.append(stream_id) return stream_ids @@ -454,8 +449,8 @@ class PerfTestBase(object): traffic_opt = option.get("traffic_opt") self.logger.debug(option) # clear streams before add new streams - self.tester.pktgen.clear_streams() - # set stream into pktgen + self.tg_node.perf_tg.clear_streams() + # set stream into traffic generator stream_option = { "stream_config": { "txmode": {}, @@ -464,8 +459,8 @@ class PerfTestBase(object): } } stream_ids = self.__add_stream_to_pktgen(streams, stream_option) - # run packet generator - result = self.tester.pktgen.measure(stream_ids, traffic_opt) + # run traffic generator + result = self.tg_node.perf_tg.measure(stream_ids, traffic_opt) self.logger.debug( f"wait {self.__traffic_stop_wait_time} second after traffic stop" ) @@ -591,21 +586,21 @@ class PerfTestBase(object): if self.__vf_driver.value != self.drivername: drvs.append(self.__vf_driver.value) for driver in drvs: - self.dut.setup_modules(self.target, driver, "") + self.sut_node.setup_modules(self.target, driver, "") def __vf_create(self): for index, port_id in enumerate(self.__valports): - port_obj = self.dut.ports_info[port_id]["port"] + port_obj = self.sut_node.ports_info[port_id]["port"] pf_driver = ( port_obj.default_driver if self.__pf_driver is NIC_DRV.PCI_STUB else self.__pf_driver.value ) - self.dut.generate_sriov_vfs_by_port(port_id, 1, driver=pf_driver) + self.sut_node.generate_sriov_vfs_by_port(port_id, 1, driver=pf_driver) pf_pci = port_obj.pci - sriov_vfs_port = self.dut.ports_info[port_id].get("vfs_port") + sriov_vfs_port = self.sut_node.ports_info[port_id].get("vfs_port") if not sriov_vfs_port: - msg = f"failed to create vf on dut port {pf_pci}" + msg = f"failed to create vf on SUT port {pf_pci}" self.logger.error(msg) continue for port in sriov_vfs_port: @@ -632,8 +627,8 @@ class PerfTestBase(object): if not self.__vf_ports_info: return for port_id, _ in self.__vf_ports_info.items(): - self.dut.destroy_sriov_vfs_by_port(port_id) - port_obj = self.dut.ports_info[port_id]["port"] + self.sut_node.destroy_sriov_vfs_by_port(port_id) + port_obj = self.sut_node.ports_info[port_id]["port"] port_obj.bind_driver(self.drivername) self.__vf_ports_info = None @@ -642,7 +637,7 @@ class PerfTestBase(object): # RX_DESC rx_desc_comlication_flag = self.__get_rx_desc_complication_flag() if rx_desc_comlication_flag: - self.dut.build_install_dpdk( + self.sut_node.build_install_dpdk( self.target, extra_options=rx_desc_comlication_flag ) @@ -650,7 +645,7 @@ class PerfTestBase(object): # restore build rx_desc_comlication_flag = self.__get_rx_desc_complication_flag() if rx_desc_comlication_flag: - self.dut.build_install_dpdk(self.target) + self.sut_node.build_install_dpdk(self.target) def __get_rx_desc_complication_flag(self): rx_desc_flag = "" @@ -675,9 +670,9 @@ class PerfTestBase(object): apply under vf testing scenario """ self.__pmd_session_name = "testpmd" - self.__pmd_session = self.dut.create_session(self.__pmd_session_name) + self.__pmd_session = self.sut_node.create_session(self.__pmd_session_name) self.__host_testpmd = os.path.join( - self.__target_dir, self.dut.apps_name["test-pmd"] + self.__target_dir, self.sut_node.apps_name["test-pmd"] ) def __start_host_testpmd(self): @@ -686,7 +681,7 @@ class PerfTestBase(object): require enough PF ports,using kernel or dpdk driver, create 1 VF from each PF. """ - corelist = self.dut.get_core_list( + corelist = self.sut_node.get_core_list( "1S/{}C/1T".format(self.__core_offset), socket=self.__socket ) core_mask = utils.create_mask(corelist[2:]) @@ -705,7 +700,7 @@ class PerfTestBase(object): **{ "bin": self.__host_testpmd, "core_mask": core_mask, - "mem_channel": self.dut.get_memory_channels(), + "mem_channel": self.sut_node.get_memory_channels(), "memsize": mem_size, "allowlist": self.__get_host_testpmd_allowlist(), "prefix": "pf", @@ -742,7 +737,7 @@ class PerfTestBase(object): def __testpmd_start(self, mode, eal_para, config, frame_size): # use test pmd - bin = os.path.join(self.__target_dir, self.dut.apps_name["test-pmd"]) + bin = os.path.join(self.__target_dir, self.sut_node.apps_name["test-pmd"]) fwd_mode, _config = config port_topo = "--port-topology={}".format(self.__get_topo_option()) command_line = ( @@ -776,7 +771,7 @@ class PerfTestBase(object): "set fwd {}".format(fwd_mode), "start", ] - [self.dut.send_expect(cmd, "testpmd>") for cmd in _cmds] + [self.sut_node.send_expect(cmd, "testpmd>") for cmd in _cmds] def __testpmd_close(self): if not self.__is_bin_ps_on: @@ -787,16 +782,16 @@ class PerfTestBase(object): "show port stats all", "stop", ] - [self.dut.send_expect(cmd, "testpmd>") for cmd in _cmds] - self.dut.send_expect("quit", "# ") + [self.sut_node.send_expect(cmd, "testpmd>") for cmd in _cmds] + self.sut_node.send_expect("quit", "# ") self.__is_bin_ps_on = False def __l3fwd_init(self): """ compile l3fwd """ - self.app_name = self.dut.apps_name["l3fwd"].replace(" ", "") - out = self.dut.build_dpdk_apps("./examples/l3fwd") + self.app_name = self.sut_node.apps_name["l3fwd"].replace(" ", "") + out = self.sut_node.build_dpdk_apps("./examples/l3fwd") self.verify("Error" not in out, "compilation error 1") self.verify("No such file" not in out, "compilation error 2") self.__l3fwd_bin = os.path.join("./" + self.app_name) @@ -836,7 +831,7 @@ class PerfTestBase(object): self.d_con([command_line, "L3FWD:", 120]) self.__is_bin_ps_on = True # wait several second for l3fwd checking ports link status. - # It is aimed to make sure packet generator detect link up status. + # It is aimed to make sure traffic generator detect link up status. wait_time = ( self.__bin_ps_wait_up if self.__bin_ps_wait_up else 2 * len(self.__valports) ) @@ -852,7 +847,7 @@ class PerfTestBase(object): self.__is_bin_ps_on = False def __bin_ps_start(self, mode, core_list, config, frame_size): - eal_para = self.dut.create_eal_parameters( + eal_para = self.sut_node.create_eal_parameters( cores=core_list, ports=self.__bin_ps_allow_list, socket=self.__socket, @@ -1282,7 +1277,7 @@ class PerfTestBase(object): return cores_config, _thread_num, queues_per_port def __get_core_list(self, thread_num, cores, socket): - corelist = self.dut.get_core_list( + corelist = self.sut_node.get_core_list( cores, socket if cores.startswith("1S") else -1 ) if self.__bin_type is BIN_TYPE.PMD: @@ -1486,7 +1481,7 @@ class PerfTestBase(object): if not port_list: return None for port_index in port_list: - pci = self.dut.ports_info[port_index].get("pci") + pci = self.sut_node.ports_info[port_index].get("pci") if not pci: continue allowlist.append(pci) @@ -1577,7 +1572,7 @@ class PerfTestBase(object): else: self.__close_host_testpmd() if self.__pmd_session: - self.dut.close_session(self.__pmd_session) + self.sut_node.close_session(self.__pmd_session) self.__pmd_session = None self.__vf_destroy() self.__restore_compilation() @@ -1589,29 +1584,28 @@ class PerfTestBase(object): self.__cur_case = None @property - def is_pktgen_on(self): - return hasattr(self.tester, "is_pktgen") and self.tester.is_pktgen + def is_tg_on(self): + return hasattr(self.tg_node, "uses_perf_tg") and self.tg_node.uses_perf_tg @property - def pktgen_type(self): - if self.is_pktgen_on: - return self.tester.pktgen.pktgen_type + def tg_type(self): + if self.is_tg_on: + return self.tg_node.perf_tg.tg_type else: return "scapy" def verify_ports_number(self, port_num): supported_num = { - PKTGEN_TREX: [2, 4], - PKTGEN_IXIA: [1, 2, 4], - PKTGEN_IXIA_NETWORK: [1, 2, 4], + TG_TREX: [2, 4], + TG_IXEXPLORER: [1, 2, 4], + TG_IXNETWORK: [1, 2, 4], } - if not self.is_pktgen_on: - msg = "not using pktgen" + if not self.is_tg_on: + msg = "not using traffic generator" self.logger.warning(msg) return # verify that enough ports are available - _supported_num = supported_num.get(self.pktgen_type) - msg = "Port number must be {} when using pktgen <{}>".format( - pformat(_supported_num), self.pktgen_type - ) + _supported_num = supported_num.get(self.tg_type) + msg = "Port number must be {} when using traffic generator <{}>"\ + .format(pformat(_supported_num), self.tg_type) self.verify(len(port_num) in _supported_num, msg) diff --git a/tests/rte_flow_common.py b/tests/rte_flow_common.py index fca055a0..566324b4 100644 --- a/tests/rte_flow_common.py +++ b/tests/rte_flow_common.py @@ -6,7 +6,7 @@ import json import re import time -from framework.packet import Packet +from framework.scapy_packet_builder import ScapyPacketBuilder from framework.utils import GREEN, RED TXQ_RXQ_NUMBER = 16 @@ -827,28 +827,28 @@ def send_ipfragment_pkt(test_case, pkts, tx_port): if isinstance(pkts, str): pkts = [pkts] for i in range(len(pkts)): - test_case.tester.scapy_session.send_expect( + test_case.tg_node.scapy_session.send_expect( 'p=eval("{}")'.format(pkts[i]), ">>> " ) if "IPv6ExtHdrFragment" in pkts[i]: - test_case.tester.scapy_session.send_expect("pkts=fragment6(p, 500)", ">>> ") + test_case.tg_node.scapy_session.send_expect("pkts=fragment6(p, 500)", ">>> ") else: - test_case.tester.scapy_session.send_expect( + test_case.tg_node.scapy_session.send_expect( "pkts=fragment(p, fragsize=500)", ">>> " ) - test_case.tester.scapy_session.send_expect( + test_case.tg_node.scapy_session.send_expect( 'sendp(pkts, iface="{}")'.format(tx_port), ">>> " ) class RssProcessing(object): - def __init__(self, test_case, pmd_output, tester_ifaces, rxq, ipfrag_flag=False): + def __init__(self, test_case, pmd_output, tg_ifaces, rxq, ipfrag_flag=False): self.test_case = test_case self.pmd_output = pmd_output - self.tester_ifaces = tester_ifaces + self.tg_ifaces = tg_ifaces self.rxq = rxq self.logger = test_case.logger - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() self.verify = self.test_case.verify self.pass_flag = "passed" self.fail_flag = "failed" @@ -1051,16 +1051,16 @@ class RssProcessing(object): return hashes, queues def send_pkt_get_output(self, pkts, port_id=0, count=1, interval=0): - tx_port = self.tester_ifaces[0] if port_id == 0 else self.tester_ifaces[1] + tx_port = self.tg_ifaces[0] if port_id == 0 else self.tg_ifaces[1] self.logger.info("----------send packet-------------") self.logger.info("{}".format(pkts)) if self.ipfrag_flag == True: count = 2 send_ipfragment_pkt(self.test_case, pkts, tx_port) else: - self.pkt.update_pkt(pkts) - self.pkt.send_pkt( - crb=self.test_case.tester, + self.scapy_pkt_builder.update_pkt(pkts) + self.scapy_pkt_builder.send_pkt( + node=self.test_case.tg_node, tx_port=tx_port, count=count, interval=interval, @@ -1210,14 +1210,14 @@ class RssProcessing(object): rule_id = 0 if isinstance(rule_id, list): for i in rule_id: - out = self.test_case.dut.send_command( + out = self.test_case.sut_node.send_command( "flow destroy %s rule %s" % (port_id, i), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule %s delete failed" % rule_id) else: - out = self.test_case.dut.send_command( + out = self.test_case.sut_node.send_command( "flow destroy %s rule %s" % (port_id, rule_id), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") @@ -1352,18 +1352,18 @@ class RssProcessing(object): class FdirProcessing(object): - def __init__(self, test_case, pmd_output, tester_ifaces, rxq, ipfrag_flag=False): + def __init__(self, test_case, pmd_output, tg_ifaces, rxq, ipfrag_flag=False): self.test_case = test_case self.pmd_output = pmd_output - self.tester_ifaces = tester_ifaces + self.tg_ifaces = tg_ifaces self.logger = test_case.logger - self.pkt = Packet() + self.scapy_pkt_builder = ScapyPacketBuilder() self.rxq = rxq self.verify = self.test_case.verify self.ipfrag_flag = ipfrag_flag def send_pkt_get_output(self, pkts, port_id=0, count=1, interval=0, drop=False): - tx_port = self.tester_ifaces[0] if port_id == 0 else self.tester_ifaces[1] + tx_port = self.tg_ifaces[0] if port_id == 0 else self.tg_ifaces[1] self.logger.info("----------send packet-------------") self.logger.info("{}".format(pkts)) if drop: @@ -1372,9 +1372,9 @@ class FdirProcessing(object): if self.ipfrag_flag == True: send_ipfragment_pkt(self.test_case, pkts, tx_port) else: - self.pkt.update_pkt(pkts) - self.pkt.send_pkt( - crb=self.test_case.tester, + self.scapy_pkt_builder.update_pkt(pkts) + self.scapy_pkt_builder.send_pkt( + node=self.test_case.tg_node, tx_port=tx_port, count=count, interval=interval, @@ -1387,9 +1387,9 @@ class FdirProcessing(object): count = 2 send_ipfragment_pkt(self.test_case, pkts, tx_port) else: - self.pkt.update_pkt(pkts) - self.pkt.send_pkt( - crb=self.test_case.tester, + self.scapy_pkt_builder.update_pkt(pkts) + self.scapy_pkt_builder.send_pkt( + node=self.test_case.tg_node, tx_port=tx_port, count=count, interval=interval, @@ -1453,14 +1453,14 @@ class FdirProcessing(object): rule_id = 0 if isinstance(rule_id, list): for i in rule_id: - out = self.test_case.dut.send_command( + out = self.test_case.sut_node.send_command( "flow destroy %s rule %s" % (port_id, i), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") m = p.search(out) self.verify(m, "flow rule %s delete failed" % rule_id) else: - out = self.test_case.dut.send_command( + out = self.test_case.sut_node.send_command( "flow destroy %s rule %s" % (port_id, rule_id), timeout=1 ) p = re.compile(r"Flow rule #(\d+) destroyed") @@ -1569,7 +1569,7 @@ class FdirProcessing(object): ) drop = tv["check_param"].get("drop") # create rule - self.test_case.dut.send_expect( + self.test_case.sut_node.send_expect( "flow flush %d" % port_id, "testpmd> ", 120 ) rule_rss = [] @@ -1638,7 +1638,7 @@ class FdirProcessing(object): self.logger.info((GREEN("case passed: %s" % tv["name"]))) except Exception as e: self.logger.warning((RED(e))) - self.test_case.dut.send_command("flow flush 0", timeout=1) + self.test_case.sut_node.send_command("flow flush 0", timeout=1) test_results[tv["name"]] = False self.logger.info((GREEN("case failed: %s" % tv["name"]))) continue @@ -1649,14 +1649,14 @@ class FdirProcessing(object): self.verify(all(test_results.values()), "{} failed".format(failed_cases)) def send_pkt_get_out(self, pkts, port_id=0, count=1, interval=0): - tx_port = self.tester_ifaces[0] if port_id == 0 else self.tester_ifaces[1] + tx_port = self.tg_ifaces[0] if port_id == 0 else self.tg_ifaces[1] self.logger.info("----------send packet-------------") self.logger.info("{}".format(pkts)) self.pmd_output.execute_cmd("start") self.pmd_output.execute_cmd("clear port stats all") - self.pkt.update_pkt(pkts) - self.pkt.send_pkt( - crb=self.test_case.tester, tx_port=tx_port, count=count, interval=interval + self.scapy_pkt_builder.update_pkt(pkts) + self.scapy_pkt_builder.send_pkt( + node=self.test_case.tg_node, tx_port=tx_port, count=count, interval=interval ) out1 = self.pmd_output.get_output(timeout=1) diff --git a/tests/smoke_base.py b/tests/smoke_base.py index 23dbe482..2ebee257 100644 --- a/tests/smoke_base.py +++ b/tests/smoke_base.py @@ -31,7 +31,7 @@ class SmokeTest(object): rss=False, driver=None, ): - self.test_case.dut.send_expect("clear port stats all", "testpmd> ") + self.test_case.sut_node.send_expect("clear port stats all", "testpmd> ") l3_len = pkt_size - HEADER_SIZE["eth"] payload = pkt_size - HEADER_SIZE["eth"] - HEADER_SIZE["ip"] hash_flag = False @@ -51,8 +51,8 @@ class SmokeTest(object): # generate PACKAGE_COUNT count package, the IP dst is random. for i in range(0, PACKAGE_COUNT): p = "Ether(dst='{}',src='{}')/IP(src='{}',dst=RandIP(),len={})/Raw(load='X'*{})".format( - self.test_case.smoke_dut_mac, - self.test_case.smoke_tester_mac, + self.test_case.smoke_sut_mac, + self.test_case.smoke_tg_mac, l3_src, l3_len, payload, @@ -61,8 +61,8 @@ class SmokeTest(object): else: pkt = [ "Ether(dst='{}',src='{}')/IP(src='{}',dst='{}',len={})/Raw(load='X'*{})".format( - self.test_case.smoke_dut_mac, - self.test_case.smoke_tester_mac, + self.test_case.smoke_sut_mac, + self.test_case.smoke_tg_mac, l3_src, l3_dst, l3_len, @@ -70,12 +70,12 @@ class SmokeTest(object): ) ] - self.test_case.pkt.update_pkt(pkt) + self.test_case.scapy_pkt_builder.update_pkt(pkt) # wait package update time.sleep(1) - self.test_case.pkt.send_pkt( - crb=self.test_case.tester, tx_port=self.test_case.smoke_tester_nic + self.test_case.scapy_pkt_builder.send_pkt( + node=self.test_case.tg_node, tx_port=self.test_case.smoke_tg_nic ) time.sleep(0.5) out = self.test_case.pmd_out.get_output(timeout=1) @@ -83,7 +83,7 @@ class SmokeTest(object): # collect all queues queues = queue_pattern.findall(out) # get dpdk statistical information - stats = self.test_case.pmd_out.get_pmd_stats(self.test_case.smoke_dut_ports[0]) + stats = self.test_case.pmd_out.get_pmd_stats(self.test_case.smoke_sut_ports[0]) if "RTE_MBUF_F_RX_RSS_HASH" in out: hash_flag = True @@ -158,11 +158,11 @@ class SmokeTest(object): self.test_case.logger.info("txq rxq the queues[{}] error".format(queues)) return False - self.test_case.dut.send_expect("stop", "testpmd> ") - self.test_case.dut.send_expect("port stop all", "testpmd> ") - self.test_case.dut.send_expect("port config all rxq 1", "testpmd> ") - self.test_case.dut.send_expect("port config all txq 1", "testpmd> ") - out = self.test_case.dut.send_expect("show config rxtx", "testpmd> ") + self.test_case.sut_node.send_expect("stop", "testpmd> ") + self.test_case.sut_node.send_expect("port stop all", "testpmd> ") + self.test_case.sut_node.send_expect("port config all rxq 1", "testpmd> ") + self.test_case.sut_node.send_expect("port config all txq 1", "testpmd> ") + out = self.test_case.sut_node.send_expect("show config rxtx", "testpmd> ") if "RX queue number: 1" not in out: self.test_case.logger.info("RX queue number 1 no display") return False @@ -170,9 +170,9 @@ class SmokeTest(object): self.test_case.logger.info("Tx queue number 1 no display") return False - self.test_case.dut.send_expect("port start all", "testpmd> ") - self.test_case.dut.send_expect("start", "testpmd> ") - self.test_case.pmd_out.wait_link_status_up(self.test_case.smoke_dut_ports[0]) + self.test_case.sut_node.send_expect("port start all", "testpmd> ") + self.test_case.sut_node.send_expect("start", "testpmd> ") + self.test_case.pmd_out.wait_link_status_up(self.test_case.smoke_sut_ports[0]) queue_after, stats = self.send_pkg_return_stats() if queue_after is None: -- 2.20.1