From: Jun Dong <junx.dong@intel.com>
To: dts@dpdk.org
Cc: lijuan.tu@intel.com, qingx.sun@intel.com, junx.dong@intel.com,
"Juraj Linkeš" <juraj.linkes@pantheon.tech>
Subject: [V3 2/5] rename base classes 2
Date: Fri, 10 Jun 2022 13:08:07 +0800 [thread overview]
Message-ID: <20220610050810.1531-3-junx.dong@intel.com> (raw)
In-Reply-To: <20220610050810.1531-1-junx.dong@intel.com>
From: Juraj Linkeš <juraj.linkes@pantheon.tech>
framework/*
main.py
nics/*
tools/*
Signed-off-by: Juraj Linkeš <juraj.linkes@pantheon.tech>
Signed-off-by: Jun Dong <junx.dong@intel.com>
---
V3:
- Fixed self string key word confuse through replace 'tg' to 'Traffic'
- Fixed self string key word confuse through replace 'sut' to 'Sut'
V2:
The original modification of rename
framework/asan_test.py | 18 +-
framework/checkCase.py | 44 +--
framework/config.py | 192 ++++++------
framework/crbs.py | 37 ---
framework/debugger.py | 8 +-
framework/dts.py | 296 +++++++++---------
framework/excel_reporter.py | 74 ++---
framework/exception.py | 8 +-
framework/ixia_network/ixnet.py | 2 +-
framework/json_reporter.py | 40 +--
framework/logger.py | 188 +++++------
framework/multiple_vm.py | 78 ++---
framework/{crb.py => node.py} | 120 +++----
framework/pktgen.py | 207 ------------
framework/plotting.py | 6 +-
framework/pmd_output.py | 26 +-
framework/project_dpdk.py | 72 ++---
framework/qemu_kvm.py | 88 +++---
framework/qemu_libvirt.py | 30 +-
framework/rst.py | 8 +-
.../{packet.py => scapy_packet_builder.py} | 233 +++++++-------
framework/settings.py | 30 +-
framework/ssh_connection.py | 12 +-
framework/ssh_pexpect.py | 42 +--
framework/stats_reporter.py | 22 +-
framework/{dut.py => sut_node.py} | 247 ++++++++-------
framework/test_case.py | 88 +++---
framework/test_result.py | 210 ++++++-------
.../{pktgen_ixia.py => tg_ixexplorer.py} | 115 ++++---
...pktgen_ixia_network.py => tg_ixnetwork.py} | 40 ++-
framework/{tester.py => tg_node.py} | 275 ++++++++--------
framework/{pktgen_base.py => tg_perf.py} | 266 ++++++++++++----
framework/{pktgen_trex.py => tg_trex.py} | 54 ++--
framework/utils.py | 40 +--
framework/virt_base.py | 126 ++++----
framework/virt_common.py | 6 +-
framework/virt_resource.py | 36 +--
framework/virt_scene.py | 108 +++----
framework/{virt_dut.py => virt_sut.py} | 102 +++---
main.py | 12 +-
nics/net_device.py | 46 +--
nics/system_info.py | 6 +-
tools/dump_case.py | 6 +-
tools/setup.py | 134 ++++----
44 files changed, 1864 insertions(+), 1934 deletions(-)
delete mode 100644 framework/crbs.py
rename framework/{crb.py => node.py} (91%)
delete mode 100644 framework/pktgen.py
rename framework/{packet.py => scapy_packet_builder.py} (85%)
rename framework/{dut.py => sut_node.py} (89%)
rename framework/{pktgen_ixia.py => tg_ixexplorer.py} (95%)
rename framework/{pktgen_ixia_network.py => tg_ixnetwork.py} (83%)
rename framework/{tester.py => tg_node.py} (76%)
rename framework/{pktgen_base.py => tg_perf.py} (70%)
rename framework/{pktgen_trex.py => tg_trex.py} (95%)
rename framework/{virt_dut.py => virt_sut.py} (83%)
diff --git a/framework/asan_test.py b/framework/asan_test.py
index 8ca0536b..c367f610 100644
--- a/framework/asan_test.py
+++ b/framework/asan_test.py
@@ -57,9 +57,9 @@ class _FrameworkADAPTER(object):
def decorator_build_install_dpdk():
added_param = _ASanConfig().build_param
if added_param is not None:
- from framework.project_dpdk import DPDKdut
+ from framework.project_dpdk import DPDKSut
- origin_func = DPDKdut.build_install_dpdk
+ origin_func = DPDKSut.build_install_dpdk
def new_func(*args, **kwargw):
kwargw["extra_options"] = " ".join(
@@ -67,7 +67,7 @@ class _FrameworkADAPTER(object):
)
origin_func(*args, **kwargw)
- DPDKdut.build_install_dpdk = new_func
+ DPDKSut.build_install_dpdk = new_func
@staticmethod
def decorator_dts_run():
@@ -76,14 +76,14 @@ class _FrameworkADAPTER(object):
origin_func = dts.dts_run_suite
def new_func(*args, **kwargs):
- duts = args[0]
- for dut in duts:
- dut.send_expect(COMMAND_OF_CLOSE_ADDRESS_RANDOM, "#")
+ sut_nodes = args[0]
+ for sut in sut_nodes:
+ sut.send_expect(COMMAND_OF_CLOSE_ADDRESS_RANDOM, "#")
origin_func(*args, **kwargs)
- for dut in duts:
- dut.send_expect(COMMAND_OF_OPEN_ADDRESS_RANDOM, "#")
+ for sut in sut_nodes:
+ sut.send_expect(COMMAND_OF_OPEN_ADDRESS_RANDOM, "#")
dts.dts_run_suite = new_func
@@ -290,7 +290,7 @@ class _NewReport(object):
def process_report_header(self):
head_key_list = [
- "dut",
+ "sut",
"kdriver",
"firmware",
"package",
diff --git a/framework/checkCase.py b/framework/checkCase.py
index 7b2c22b3..823329ca 100644
--- a/framework/checkCase.py
+++ b/framework/checkCase.py
@@ -23,7 +23,7 @@ class CheckCase(object):
"""
def __init__(self):
- self.dut = None
+ self.sut_node = None
self.comments = ""
self.check_function_dict = {}
@@ -50,17 +50,17 @@ class CheckCase(object):
)
)
- def check_dut(self, dut):
+ def check_sut(self, sut):
"""
- Change DUT instance for environment check
+ Change SUT instance for environment check
"""
- self.dut = dut
+ self.sut_node = sut
def _check_os(self, os_type):
if "all" == os_type[0].lower():
return True
- dut_os_type = self.dut.get_os_type()
- if dut_os_type in os_type:
+ sut_os_type = self.sut_node.get_os_type()
+ if sut_os_type in os_type:
return True
else:
return False
@@ -68,8 +68,8 @@ class CheckCase(object):
def _check_nic(self, nic_type):
if "all" == nic_type[0].lower():
return True
- dut_nic_type = get_nic_name(self.dut.ports_info[0]["type"])
- if dut_nic_type in nic_type:
+ sut_nic_type = get_nic_name(self.sut_node.ports_info[0]["type"])
+ if sut_nic_type in nic_type:
return True
else:
return False
@@ -77,7 +77,7 @@ class CheckCase(object):
def _check_target(self, target):
if "all" == target[0].lower():
return True
- if self.dut.target in target:
+ if self.sut_node.target in target:
return True
else:
return False
@@ -93,14 +93,14 @@ class CheckCase(object):
def case_skip(self, case_name):
"""
- Check whether test case and DUT match skip criteria
+ Check whether test case and SUT match skip criteria
Return True if skip should skip
"""
skip_flag = False
self.comments = ""
- if self.dut is None:
- print(RED("No Dut assigned before case skip check"))
+ if self.sut_node is None:
+ print(RED("No SUT assigned before case skip check"))
return skip_flag
if case_name in list(self.check_function_dict.keys()):
@@ -137,14 +137,14 @@ class CheckCase(object):
def case_support(self, case_name):
"""
- Check whether test case and DUT match support criteria
+ Check whether test case and SUT match support criteria
Return False if test case not supported
"""
support_flag = True
self.comments = ""
- if self.dut is None:
- print(RED("No Dut assigned before case support check"))
+ if self.sut_node is None:
+ print(RED("No SUT assigned before case support check"))
return support_flag
if case_name in list(self.support_function_dict.keys()):
@@ -179,7 +179,7 @@ class CheckCase(object):
return support_flag
-class simple_dut(object):
+class simple_sut(object):
def __init__(self, os="", target="", nic=""):
self.ports_info = [{}]
self.os = os
@@ -191,23 +191,23 @@ class simple_dut(object):
if __name__ == "__main__":
- dut = simple_dut(os="linux", target="x86_64-native-linuxapp-gcc", nic="177d:a034")
- dut1 = simple_dut(
+ sut = simple_sut(os="linux", target="x86_64-native-linuxapp-gcc", nic="177d:a034")
+ sut1 = simple_sut(
os="freebsd", target="x86_64-native-linuxapp-gcc", nic="8086:158b"
)
# create instance for check/support case list
case_inst = CheckCase()
- # check dut
- case_inst.check_dut(dut)
+ # check SUT
+ case_inst.check_sut(sut)
print(case_inst.case_skip("fdir_flexword_drop_ipv4"))
print(case_inst.comments)
print(case_inst.case_support("Vxlan_tunnel"))
print(case_inst.comments)
- # check other dut
- case_inst.check_dut(dut1)
+ # check other SUT
+ case_inst.check_sut(sut1)
print(case_inst.case_skip("fdir_flexword_drop_ipv4"))
print(case_inst.comments)
print(case_inst.case_support("Vxlan_tunnel"))
diff --git a/framework/config.py b/framework/config.py
index 2cd95ff2..27dcc5bf 100644
--- a/framework/config.py
+++ b/framework/config.py
@@ -3,7 +3,7 @@
#
"""
-Generic port and crbs configuration file load function
+Generic port and topology nodes configuration file load function
"""
import argparse # parse arguments module
import configparser # config parse module
@@ -17,21 +17,19 @@ from .exception import (
)
from .settings import (
CONFIG_ROOT_PATH,
- DTS_CFG_FOLDER,
- PKTGEN,
- PKTGEN_DPDK,
- PKTGEN_IXIA,
- PKTGEN_IXIA_NETWORK,
- PKTGEN_TREX,
+ PERF_TG_CONF_KEY,
SUITE_SECTION_NAME,
- load_global_setting,
+ TG_DPDK,
+ TG_IXEXPLORER,
+ TG_IXNETWORK,
+ TG_TREX,
)
PORTCONF = "%s/ports.cfg" % CONFIG_ROOT_PATH
-CRBCONF = "%s/crbs.cfg" % CONFIG_ROOT_PATH
+TOPOCONF = "%s/topology.cfg" % CONFIG_ROOT_PATH
VIRTCONF = "%s/virt_global.cfg" % CONFIG_ROOT_PATH
IXIACONF = "%s/ixia.cfg" % CONFIG_ROOT_PATH
-PKTGENCONF = "%s/pktgen.cfg" % CONFIG_ROOT_PATH
+TGCONF = "%s/traffic_generator.cfg" % CONFIG_ROOT_PATH
SUITECONF_SAMPLE = "%s/suite_sample.cfg" % CONFIG_ROOT_PATH
GLOBALCONF = "%s/global_suite.cfg" % CONFIG_ROOT_PATH
APPNAMECONF = "%s/app_name.cfg" % CONFIG_ROOT_PATH
@@ -198,12 +196,12 @@ class PortConf(UserConf):
self.port_conf = None
raise PortConfigParseException
- def load_ports_config(self, crbIP):
+ def load_ports_config(self, nodeIP):
self.ports_cfg = {}
if self.port_conf is None:
return
- ports = self.port_conf.load_section(crbIP)
+ ports = self.port_conf.load_section(nodeIP)
if ports is None:
return
key, config = ports[0]
@@ -247,105 +245,105 @@ class PortConf(UserConf):
return False
-class CrbsConf(UserConf):
- DEF_CRB = {
+class TopologyConf(UserConf):
+ TOPO_DEFAULTS = {
"IP": "",
"board": "default",
"user": "",
"pass": "",
- "tester IP": "",
- "tester pass": "",
+ "tg IP": "",
+ "tg pass": "",
"memory channels": 4,
- PKTGEN: None,
+ PERF_TG_CONF_KEY: None,
"bypass core0": True,
- "dut_cores": "",
- "snapshot_load_side": "tester",
+ "sut_cores": "",
+ "snapshot_load_side": "tg",
}
- def __init__(self, crbs_conf=CRBCONF):
- self.config_file = crbs_conf
- self.crbs_cfg = []
+ def __init__(self, topo_conf=TOPOCONF):
+ self.config_file = topo_conf
+ self.topo_cfg = []
try:
- self.crbs_conf = UserConf(self.config_file)
+ self.topo_conf = UserConf(self.config_file)
except ConfigParseException:
- self.crbs_conf = None
+ self.topo_conf = None
raise ConfigParseException
- def load_crbs_config(self):
- sections = self.crbs_conf.get_sections()
+ def load_topo_config(self):
+ sections = self.topo_conf.get_sections()
if not sections:
- return self.crbs_cfg
+ return self.topo_cfg
- for name in sections:
- crb = self.DEF_CRB.copy()
- crb["section"] = name
- crb_confs = self.crbs_conf.load_section(name)
- if not crb_confs:
+ for node_name in sections:
+ node = self.TOPO_DEFAULTS.copy()
+ node["section"] = node_name
+ node_conf = self.topo_conf.load_section(node_name)
+ if not node_conf:
continue
- # convert file configuration to dts crbs
- for conf in crb_confs:
+ # convert file configuration to dts node configuration
+ for conf in node_conf:
key, value = conf
- if key == "dut_ip":
- crb["IP"] = value
- elif key == "dut_user":
- crb["user"] = value
- elif key == "dut_passwd":
- crb["pass"] = value
+ if key == "sut_ip":
+ node["IP"] = value
+ elif key == "sut_user":
+ node["user"] = value
+ elif key == "sut_passwd":
+ node["pass"] = value
elif key == "os":
- crb["OS"] = value
- elif key == "tester_ip":
- crb["tester IP"] = value
- elif key == "tester_passwd":
- crb["tester pass"] = value
- elif key == "pktgen_group":
- crb[PKTGEN] = value.lower()
+ node["OS"] = value
+ elif key == "tg_ip":
+ node["tg IP"] = value
+ elif key == "tg_passwd":
+ node["tg pass"] = value
+ elif key == "perf_tg":
+ node[PERF_TG_CONF_KEY] = value.lower()
elif key == "channels":
- crb["memory channels"] = int(value)
+ node["memory channels"] = int(value)
elif key == "bypass_core0":
if value == "True":
- crb["bypass core0"] = True
+ node["bypass core0"] = True
else:
- crb["bypass core0"] = False
+ node["bypass core0"] = False
elif key == "board":
- crb["board"] = value
- elif key == "dut_arch":
- crb["dut arch"] = value
- elif key == "dut_cores":
- crb["dut_cores"] = value
+ node["board"] = value
+ elif key == "sut_arch":
+ node["sut arch"] = value
+ elif key == "sut_cores":
+ node["sut_cores"] = value
elif key == "snapshot_load_side":
- crb["snapshot_load_side"] = value.lower()
+ node["snapshot_load_side"] = value.lower()
- self.crbs_cfg.append(crb)
- return self.crbs_cfg
+ self.topo_cfg.append(node)
+ return self.topo_cfg
-class PktgenConf(UserConf):
- def __init__(self, pktgen_type="ixia", pktgen_conf=PKTGENCONF):
- self.config_file = pktgen_conf
- self.pktgen_type = pktgen_type.lower()
- self.pktgen_cfg = {}
+class TrafficGeneratorConf(UserConf):
+ def __init__(self, tg_type=TG_IXEXPLORER, tg_conf=TGCONF):
+ self.config_file = tg_conf
+ self.tg_type = tg_type.lower()
+ self.tg_cfg = {}
try:
- self.pktgen_conf = UserConf(self.config_file)
+ self.tg_conf = UserConf(self.config_file)
except ConfigParseException:
- self.pktgen_conf = None
+ self.tg_conf = None
raise ConfigParseException
- def load_pktgen_ixia_config(self, section):
+ def load_tg_ixia_config(self, section):
port_reg = r"card=(\d+),port=(\d+)"
- pktgen_confs = self.pktgen_conf.load_section(section)
- if not pktgen_confs:
+ tg_conf = self.tg_conf.load_section(section)
+ if not tg_conf:
return
# convert file configuration to dts ixiacfg
ixia_group = {}
- for conf in pktgen_confs:
+ for conf in tg_conf:
key, value = conf
if key == "ixia_version":
ixia_group["Version"] = value
elif key == "ixia_ip":
ixia_group["IP"] = value
elif key == "ixia_ports":
- ports = self.pktgen_conf.load_config(value)
+ ports = self.tg_conf.load_config(value)
ixia_ports = []
for port in ports:
m = re.match(port_reg, port)
@@ -370,42 +368,42 @@ class PktgenConf(UserConf):
print("ixia configuration file request ixia_ports option!!!")
return
- self.pktgen_cfg[section.lower()] = ixia_group
+ self.tg_cfg[section.lower()] = ixia_group
- def load_pktgen_config(self):
- sections = self.pktgen_conf.get_sections()
+ def load_tg_config(self):
+ sections = self.tg_conf.get_sections()
if not sections:
- return self.pktgen_cfg
+ return self.tg_cfg
for section in sections:
- if self.pktgen_type == PKTGEN_DPDK and section.lower() == PKTGEN_DPDK:
- pktgen_confs = self.pktgen_conf.load_section(section)
- if not pktgen_confs:
+ if self.tg_type == TG_DPDK and section.lower() == TG_DPDK:
+ tg_conf = self.tg_conf.load_section(section)
+ if not tg_conf:
continue
- # covert file configuration to dts pktgen cfg
- for conf in pktgen_confs:
+ # covert file configuration to dts tg cfg
+ for conf in tg_conf:
key, value = conf
- self.pktgen_cfg[key] = value
- elif self.pktgen_type == PKTGEN_TREX and section.lower() == PKTGEN_TREX:
- pktgen_confs = self.pktgen_conf.load_section(section)
- if not pktgen_confs:
+ self.tg_cfg[key] = value
+ elif self.tg_type == TG_TREX and section.lower() == TG_TREX:
+ tg_conf = self.tg_conf.load_section(section)
+ if not tg_conf:
continue
- # covert file configuration to dts pktgen cfg
- for conf in pktgen_confs:
+ # covert file configuration to dts tg cfg
+ for conf in tg_conf:
key, value = conf
- self.pktgen_cfg[key] = value
+ self.tg_cfg[key] = value
elif (
- self.pktgen_type == PKTGEN_IXIA and section.lower() == PKTGEN_IXIA
+ self.tg_type == TG_IXEXPLORER and section.lower() == TG_IXEXPLORER
) or (
- self.pktgen_type == PKTGEN_IXIA_NETWORK
- and section.lower() == PKTGEN_IXIA_NETWORK
+ self.tg_type == TG_IXNETWORK
+ and section.lower() == TG_IXNETWORK
):
- # covert file configuration to dts pktgen cfg
- self.load_pktgen_ixia_config(section)
+ # covert file configuration to dts tg cfg
+ self.load_tg_ixia_config(section)
- return self.pktgen_cfg
+ return self.tg_cfg
class AppNameConf(UserConf):
@@ -441,7 +439,7 @@ class AppNameConf(UserConf):
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load DTS configuration files")
parser.add_argument("-p", "--portconf", default=PORTCONF)
- parser.add_argument("-c", "--crbconf", default=CRBCONF)
+ parser.add_argument("-t", "--topoconf", default=TOPOCONF)
parser.add_argument("-v", "--virtconf", default=VIRTCONF)
parser.add_argument("-i", "--ixiaconf", default=IXIACONF)
args = parser.parse_args()
@@ -463,7 +461,7 @@ if __name__ == "__main__":
# example for port configuration file
portconf = PortConf(PORTCONF)
- portconf.load_ports_config("DUT IP")
+ portconf.load_ports_config("SUT IP")
print(portconf.get_ports_config())
portconf.check_port_available("86:00.0")
@@ -472,9 +470,9 @@ if __name__ == "__main__":
virtconf.load_virt_config("LIBVIRT")
print(virtconf.get_virt_config())
- # example for crbs configuration file
- crbsconf = CrbsConf(CRBCONF)
- print(crbsconf.load_crbs_config())
+ # example for nodes configuration file
+ topoconf = TopologyConf(TOPOCONF)
+ print(topoconf.load_topo_config())
# example for suite configure file
suiteconf = SuiteConf("suite_sample")
diff --git a/framework/crbs.py b/framework/crbs.py
deleted file mode 100644
index 5d5f0e1e..00000000
--- a/framework/crbs.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Static configuration data for any CRBs that can be used.
-"""
-
-crbs_desc = {
- "CrownPassCRB1": """
- - Intel Grizzly Pass Server Board populated with:
-
- - 2x Intel Xeon CPU E5-2680 @ 2.7GHz with 64 KB L1 D-cache (per
- physical core), 256 KB L2 D-cache (per physical core) and 25 MB of
- L3 D-cache (shared across physical cores).
- - 8x DDR3 DIMMs @ 1333 MHz of 4GB each. Each of the 4 memory channels of each
- CPU is populated with 2 DIMMs.
- - 4x Intel 82599 NICs (2x 10GbE full duplex optical ports per NIC)
- plugged into the available PCIe Gen2 8-lane slots. To avoid PCIe bandwidth
- bottlenecks at high packet rates, a single optical port from each NIC is
- connected to the traffic generator.
-
- - BIOS version R02.01.0002 with the following settings:
-
- - Intel Turbo Boost Technology [Disabled]
- - Enhanced Intel SpeedStep Technology (EIST) [Disabled]
- - Intel Hyper-Threading Technology [Enabled]
- - Direct Cache Access [Disabled]
-
- - Execute DisableBit [Enabled]
- - MLC Streamer [Enabled]
- - MLC Spatial Prefetcher [Disabled]
- - DCU Data Prefetcher [Disabled]
- - DCU Instruction Prefetcher [Enabled]
-
- - Software configuration:
-
- - Linux operating system: Fedora 20 64-bit
- - Linux kernel version: 3.6.10
- """
-}
diff --git a/framework/debugger.py b/framework/debugger.py
index 20f4ff2b..d362f355 100644
--- a/framework/debugger.py
+++ b/framework/debugger.py
@@ -26,7 +26,7 @@ def help_command():
console.push("print(' - help(): help messages')")
console.push("print(' - list(): list all connections')")
console.push("print(' - connect(): bind to specified connection')")
- console.push("print(' - : connect(\"dut\")')")
+ console.push("print(' - : connect(\"SUT\")')")
console.push("print(' - quit(): quit debug module')")
console.push("print(' - exit(): exit processing procedure')")
console.push("print(' - debug(): call python debug module for further debug')")
@@ -74,13 +74,13 @@ def rerun_command():
new_module = imp.reload(AliveModule)
# save arguments required to initialize suite
- duts = AliveSuite.__dict__["duts"]
- tester = AliveSuite.__dict__["tester"]
+ sut_nodes = AliveSuite.__dict__["sut_nodes"]
+ tg_node = AliveSuite.__dict__["tg_node"]
target = AliveSuite.__dict__["target"]
suite = AliveSuite.__dict__["suite_name"]
for test_classname, test_class in get_subclasses(new_module, TestCase):
- suite_obj = test_class(duts, tester, target, suite)
+ suite_obj = test_class(sut_nodes, tg_node, target, suite)
# copy all element from previous suite to reloaded suite
copy_instance_attr(AliveSuite, suite_obj)
diff --git a/framework/dts.py b/framework/dts.py
index a894c461..54570090 100644
--- a/framework/dts.py
+++ b/framework/dts.py
@@ -23,17 +23,17 @@ import framework.settings as settings # dts settings
from framework.asan_test import ASanTestProcess
from .checkCase import CheckCase
-from .config import CrbsConf
-from .dut import Dut
+from .config import TopologyConf
from .excel_reporter import ExcelReporter
from .exception import ConfigParseException, TimeoutException, VerifyFailure
from .json_reporter import JSONReporter
from .logger import getLogger
from .serializer import Serializer
from .stats_reporter import StatsReporter
+from .sut_node import SutNode
from .test_case import TestCase
from .test_result import Result
-from .tester import Tester
+from .tg_node import TrafficGeneratorNode
from .utils import (
check_dts_python_version,
copy_instance_attr,
@@ -131,7 +131,7 @@ def dts_parse_config(config, section):
"""
Parse execution file configuration.
"""
- duts = [dut_.strip() for dut_ in config.get(section, "crbs").split(",")]
+ sut_nodes = [sut_.strip() for sut_ in config.get(section, "sut").split(",")]
targets = [target.strip() for target in config.get(section, "targets").split(",")]
test_suites = [
suite.strip() for suite in config.get(section, "test_suites").split(",")
@@ -147,7 +147,7 @@ def dts_parse_config(config, section):
if suite == "":
test_suites.remove(suite)
- return duts, targets, test_suites
+ return sut_nodes, targets, test_suites
def dts_parse_commands(commands):
@@ -159,7 +159,7 @@ def dts_parse_commands(commands):
if commands is None:
return dts_commands
- args_format = {"shell": 0, "crb": 1, "stage": 2, "check": 3, "max_num": 4}
+ args_format = {"shell": 0, "node": 1, "stage": 2, "check": 3, "max_num": 4}
cmd_fmt = r"\[(.*)\]"
for command in commands:
@@ -180,10 +180,10 @@ def dts_parse_commands(commands):
dts_command["command"] = shell_cmd[:-1]
else:
dts_command["command"] = args[0]
- if args[1] == "tester":
- dts_command["host"] = "tester"
+ if args[1] == "tg":
+ dts_command["host"] = "tg"
else:
- dts_command["host"] = "dut"
+ dts_command["host"] = "sut"
if args[2] == "post-init":
dts_command["stage"] = "post-init"
else:
@@ -198,24 +198,24 @@ def dts_parse_commands(commands):
return dts_commands
-def dts_run_commands(crb, dts_commands):
+def dts_run_commands(node, dts_commands):
"""
Run dts input commands
"""
for dts_command in dts_commands:
command = dts_command["command"]
- if dts_command["host"] in crb.NAME:
- if crb.stage == dts_command["stage"]:
- ret = crb.send_expect(command, expected="# ", verify=True)
+ if dts_command["host"] in node.NAME:
+ if node.stage == dts_command["stage"]:
+ ret = node.send_expect(command, expected="# ", verify=True)
if type(ret) is int:
log_handler.error("[%s] return failure" % command)
if dts_command["verify"] is True:
raise VerifyFailure("Command execution failed")
-def get_project_obj(project_name, super_class, crbInst, serializer, dut_id):
+def get_project_obj(project_name, super_class, nodeInst, serializer, sut_id):
"""
- Load project module and return crb instance.
+ Load project module and return node instance.
"""
project_obj = None
PROJECT_MODULE_PREFIX = "project_"
@@ -228,163 +228,163 @@ def get_project_obj(project_name, super_class, crbInst, serializer, dut_id):
for project_subclassname, project_subclass in get_subclasses(
project_module, super_class
):
- project_obj = project_subclass(crbInst, serializer, dut_id)
+ project_obj = project_subclass(nodeInst, serializer, sut_id)
if project_obj is None:
- project_obj = super_class(crbInst, serializer, dut_id)
+ project_obj = super_class(nodeInst, serializer, sut_id)
except Exception as e:
log_handler.info("LOAD PROJECT MODULE INFO: " + str(e))
- project_obj = super_class(crbInst, serializer, dut_id)
+ project_obj = super_class(nodeInst, serializer, sut_id)
return project_obj
-def dts_log_testsuite(duts, tester, suite_obj, log_handler, test_classname):
+def dts_log_testsuite(sut_nodes, tg_node, suite_obj, log_handler, test_classname):
"""
Change to SUITE self logger handler.
"""
log_handler.config_suite(test_classname, "dts")
- tester.logger.config_suite(test_classname, "tester")
- if hasattr(tester, "logger_alt"):
- tester.logger_alt.config_suite(test_classname, "tester")
- if hasattr(tester, "logger_scapy"):
- tester.logger_scapy.config_suite(test_classname, "tester")
+ tg_node.logger.config_suite(test_classname, "tg")
+ if hasattr(tg_node, "logger_alt"):
+ tg_node.logger_alt.config_suite(test_classname, "tg")
+ if hasattr(tg_node, "logger_scapy"):
+ tg_node.logger_scapy.config_suite(test_classname, "tg")
- for dutobj in duts:
- dutobj.logger.config_suite(test_classname, "dut")
- dutobj.test_classname = test_classname
+ for sut_node in sut_nodes:
+ sut_node.logger.config_suite(test_classname, "sut")
+ sut_node.test_classname = test_classname
try:
- if tester.it_uses_external_generator():
+ if tg_node.it_uses_external_generator():
if (
- tester.is_pktgen
- and hasattr(tester, "pktgen")
- and getattr(tester, "pktgen")
+ tg_node.uses_perf_tg
+ and hasattr(tg_node, "perf_tg")
+ and getattr(tg_node, "perf_tg")
):
- tester.pktgen.logger.config_suite(test_classname, "pktgen")
+ tg_node.perf_tg.logger.config_suite(test_classname, "perf_tg")
except Exception as ex:
pass
-def dts_log_execution(duts, tester, log_handler):
+def dts_log_execution(sut_nodes, tg_node, log_handler):
"""
Change to DTS default logger handler.
"""
log_handler.config_execution("dts")
- tester.logger.config_execution("tester")
+ tg_node.logger.config_execution("tg")
- for dutobj in duts:
- dutobj.logger.config_execution(
- "dut" + settings.LOG_NAME_SEP + "%s" % dutobj.crb["My IP"]
+ for sut_node in sut_nodes:
+ sut_node.logger.config_execution(
+ "sut" + settings.LOG_NAME_SEP + "%s" % sut_node.node["My IP"]
)
try:
- if tester.it_uses_external_generator():
+ if tg_node.it_uses_external_generator():
if (
- tester.is_pktgen
- and hasattr(tester, "pktgen")
- and getattr(tester, "pktgen")
+ tg_node.uses_perf_tg
+ and hasattr(tg_node, "perf_tg")
+ and getattr(tg_node, "perf_tg")
):
- tester.pktgen.logger.config_execution("pktgen")
+ tg_node.perf_tg.logger.config_execution("perf_tg")
except Exception as ex:
pass
-def dts_crbs_init(
- crbInsts, skip_setup, read_cache, project, base_dir, serializer, virttype
+def dts_nodes_init(
+ nodeInsts, skip_setup, read_cache, project, base_dir, serializer, virttype
):
"""
- Create dts dut/tester instance and initialize them.
+ Create dts SUT/TG instance and initialize them.
"""
- duts = []
+ sut_nodes = []
serializer.set_serialized_filename(
- settings.FOLDERS["Output"] + "/.%s.cache" % crbInsts[0]["IP"]
+ settings.FOLDERS["Output"] + "/.%s.cache" % nodeInsts[0]["IP"]
)
serializer.load_from_file()
- testInst = copy.copy(crbInsts[0])
- testInst["My IP"] = crbInsts[0]["tester IP"]
- tester = get_project_obj(project, Tester, testInst, serializer, dut_id=0)
+ testInst = copy.copy(nodeInsts[0])
+ testInst["My IP"] = nodeInsts[0]["tg IP"]
+ tg_node = get_project_obj(project, TrafficGeneratorNode, testInst, serializer, sut_id=0)
- dut_id = 0
- for crbInst in crbInsts:
- dutInst = copy.copy(crbInst)
- dutInst["My IP"] = crbInst["IP"]
- dutobj = get_project_obj(project, Dut, dutInst, serializer, dut_id=dut_id)
- duts.append(dutobj)
- dut_id += 1
+ sut_id = 0
+ for nodeInst in nodeInsts:
+ sutInst = copy.copy(nodeInst)
+ sutInst["My IP"] = nodeInst["IP"]
+ sut_node = get_project_obj(project, SutNode, sutInst, serializer, sut_id=sut_id)
+ sut_nodes.append(sut_node)
+ sut_id += 1
- dts_log_execution(duts, tester, log_handler)
+ dts_log_execution(sut_nodes, tg_node, log_handler)
- tester.duts = duts
+ tg_node.sut_nodes = sut_nodes
show_speedup_options_messages(read_cache, skip_setup)
- tester.set_speedup_options(read_cache, skip_setup)
+ tg_node.set_speedup_options(read_cache, skip_setup)
try:
- tester.init_ext_gen()
+ tg_node.init_ext_gen()
except Exception as e:
log_handler.error(str(e))
- tester.close()
- for dutobj in duts:
- dutobj.close()
+ tg_node.close()
+ for sut_node in sut_nodes:
+ sut_node.close()
raise e
nic = settings.load_global_setting(settings.HOST_NIC_SETTING)
- for dutobj in duts:
- dutobj.tester = tester
- dutobj.setup_virtenv(virttype)
- dutobj.set_speedup_options(read_cache, skip_setup)
- dutobj.set_directory(base_dir)
+ for sut_node in sut_nodes:
+ sut_node.tg_node = tg_node
+ sut_node.setup_virtenv(virttype)
+ sut_node.set_speedup_options(read_cache, skip_setup)
+ sut_node.set_directory(base_dir)
# save execution nic setting
- dutobj.set_nic_type(nic)
+ sut_node.set_nic_type(nic)
- return duts, tester
+ return sut_nodes, tg_node
-def dts_crbs_exit(duts, tester):
+def dts_nodes_exit(sut_nodes, tg_node):
"""
- Call dut and tester exit function after execution finished
+ Call SUT and TG exit function after execution finished
"""
- for dutobj in duts:
- dutobj.crb_exit()
+ for sut_node in sut_nodes:
+ sut_node.node_exit()
- tester.crb_exit()
+ tg_node.node_exit()
-def dts_run_prerequisties(duts, tester, pkgName, patch, dts_commands, serializer):
+def dts_run_prerequisties(sut_nodes, tg_node, pkgName, patch, dts_commands, serializer):
"""
Run dts prerequisties function.
"""
try:
- dts_run_commands(tester, dts_commands)
- tester.prerequisites()
- dts_run_commands(tester, dts_commands)
+ dts_run_commands(tg_node, dts_commands)
+ tg_node.prerequisites()
+ dts_run_commands(tg_node, dts_commands)
except Exception as ex:
log_handler.error(" PREREQ EXCEPTION " + traceback.format_exc())
log_handler.info("CACHE: Discarding cache.")
serializer.discard_cache()
- settings.report_error("TESTER_SETUP_ERR")
+ settings.report_error("TG_SETUP_ERR")
return False
try:
- for dutobj in duts:
- dts_run_commands(dutobj, dts_commands)
- dutobj.set_package(pkgName, patch)
- dutobj.prerequisites()
- dts_run_commands(dutobj, dts_commands)
+ for sut_node in sut_nodes:
+ dts_run_commands(sut_node, dts_commands)
+ sut_node.set_package(pkgName, patch)
+ sut_node.prerequisites()
+ dts_run_commands(sut_node, dts_commands)
serializer.save_to_file()
except Exception as ex:
log_handler.error(" PREREQ EXCEPTION " + traceback.format_exc())
- result.add_failed_dut(duts[0], str(ex))
+ result.add_failed_sut(sut_nodes[0], str(ex))
log_handler.info("CACHE: Discarding cache.")
serializer.discard_cache()
- settings.report_error("DUT_SETUP_ERR")
+ settings.report_error("SUT_SETUP_ERR")
return False
else:
- result.remove_failed_dut(duts[0])
+ result.remove_failed_sut(sut_nodes[0])
-def dts_run_target(duts, tester, targets, test_suites, subtitle):
+def dts_run_target(sut_nodes, tg_node, targets, test_suites, subtitle):
"""
Run each target in execution targets.
"""
@@ -395,35 +395,35 @@ def dts_run_target(duts, tester, targets, test_suites, subtitle):
try:
drivername = settings.load_global_setting(settings.HOST_DRIVER_SETTING)
if drivername == "":
- for dutobj in duts:
- dutobj.set_target(target, bind_dev=False)
+ for sut_node in sut_nodes:
+ sut_node.set_target(target, bind_dev=False)
else:
- for dutobj in duts:
- dutobj.set_target(target)
+ for sut_node in sut_nodes:
+ sut_node.set_target(target)
except AssertionError as ex:
log_handler.error(" TARGET ERROR: " + str(ex))
settings.report_error("DPDK_BUILD_ERR")
- result.add_failed_target(result.dut, target, str(ex))
+ result.add_failed_target(result.sut, target, str(ex))
continue
except Exception as ex:
settings.report_error("GENERIC_ERR")
log_handler.error(" !!! DEBUG IT: " + traceback.format_exc())
- result.add_failed_target(result.dut, target, str(ex))
+ result.add_failed_target(result.sut, target, str(ex))
continue
else:
- result.remove_failed_target(result.dut, target)
+ result.remove_failed_target(result.sut, target)
- dts_run_suite(duts, tester, test_suites, target, subtitle)
+ dts_run_suite(sut_nodes, tg_node, test_suites, target, subtitle)
- tester.restore_interfaces()
+ tg_node.restore_interfaces()
- for dutobj in duts:
- dutobj.stop_ports()
- dutobj.restore_interfaces()
- dutobj.restore_modules()
+ for sut_node in sut_nodes:
+ sut_node.stop_ports()
+ sut_node.restore_interfaces()
+ sut_node.restore_modules()
-def dts_run_suite(duts, tester, test_suites, target, subtitle):
+def dts_run_suite(sut_nodes, tg_node, test_suites, target, subtitle):
"""
Run each suite in test suite list.
"""
@@ -442,7 +442,7 @@ def dts_run_suite(duts, tester, test_suites, target, subtitle):
)
for test_classname, test_class in get_subclasses(suite_module, TestCase):
- suite_obj = test_class(duts, tester, target, suite_name)
+ suite_obj = test_class(sut_nodes, tg_node, target, suite_name)
suite_obj.init_log()
suite_obj.set_requested_cases(requested_tests)
suite_obj.set_requested_cases(append_requested_case_list)
@@ -450,7 +450,7 @@ def dts_run_suite(duts, tester, test_suites, target, subtitle):
suite_obj.set_subtitle(subtitle)
result.nic = suite_obj.nic
- dts_log_testsuite(duts, tester, suite_obj, log_handler, test_classname)
+ dts_log_testsuite(sut_nodes, tg_node, suite_obj, log_handler, test_classname)
log_handler.info("\nTEST SUITE : " + test_classname)
log_handler.info("NIC : " + result.nic)
@@ -462,7 +462,7 @@ def dts_run_suite(duts, tester, test_suites, target, subtitle):
result.copy_suite(suite_obj.get_result())
log_handler.info("\nTEST SUITE ENDED: " + test_classname)
- dts_log_execution(duts, tester, log_handler)
+ dts_log_execution(sut_nodes, tg_node, log_handler)
except VerifyFailure:
settings.report_error("SUITE_EXECUTE_ERR")
log_handler.error(" !!! DEBUG IT: " + traceback.format_exc())
@@ -590,67 +590,67 @@ def run_all(
stats_report = StatsReporter(output_dir + "/statistics.txt")
result = Result()
- crbs_conf = CrbsConf()
- crbs = crbs_conf.load_crbs_config()
+ nodes_conf = TopologyConf()
+ nodes = nodes_conf.load_topo_config()
# for all Execution sections
for section in config.sections():
- crbInsts = list()
+ nodeInsts = list()
dts_parse_param(config, section, log_handler)
# verify if the delimiter is good if the lists are vertical
- duts, targets, test_suites = dts_parse_config(config, section)
- for dut in duts:
- log_handler.info("\nDUT " + dut)
-
- # look up in crbs - to find the matching IP
- for dut in duts:
- for crb in crbs:
- if crb["section"] == dut:
- crbInsts.append(crb)
+ sut_nodes, targets, test_suites = dts_parse_config(config, section)
+ for sut in sut_nodes:
+ log_handler.info("\nSUT " + sut)
+
+ # look up in nodes - to find the matching IP
+ for sut in sut_nodes:
+ for node in nodes:
+ if node["section"] == sut:
+ nodeInsts.append(node)
break
- # only run on the dut in known crbs
- if len(crbInsts) == 0:
- log_handler.error(" SKIP UNKNOWN CRB")
+ # only run on the SUT in known nodes
+ if len(nodeInsts) == 0:
+ log_handler.error(" SKIP UNKNOWN NODE")
continue
- result.dut = duts[0]
+ result.sut = sut_nodes[0]
# init global lock
- create_parallel_locks(len(duts))
+ create_parallel_locks(len(sut_nodes))
- # init dut, tester crb
- duts, tester = dts_crbs_init(
- crbInsts, skip_setup, read_cache, project, base_dir, serializer, virttype
+ # init SUT, TG node
+ sut_nodes, tg_node = dts_nodes_init(
+ nodeInsts, skip_setup, read_cache, project, base_dir, serializer, virttype
)
- tester.set_re_run(re_run)
+ tg_node.set_re_run(re_run)
# register exit action
- atexit.register(quit_execution, duts, tester)
+ atexit.register(quit_execution, sut_nodes, tg_node)
- check_case_inst.check_dut(duts[0])
+ check_case_inst.check_sut(sut_nodes[0])
- # Run DUT prerequisites
+ # Run SUT prerequisites
if (
dts_run_prerequisties(
- duts, tester, pkgName, patch, dts_commands, serializer
+ sut_nodes, tg_node, pkgName, patch, dts_commands, serializer
)
is False
):
- dts_crbs_exit(duts, tester)
+ dts_nodes_exit(sut_nodes, tg_node)
continue
- result.kdriver = duts[0].nic.default_driver + "-" + duts[0].nic.driver_version
- result.firmware = duts[0].nic.firmware
+ result.kdriver = sut_nodes[0].nic.default_driver + "-" + sut_nodes[0].nic.driver_version
+ result.firmware = sut_nodes[0].nic.firmware
result.package = (
- duts[0].nic.pkg["type"] + " " + duts[0].nic.pkg["version"]
- if duts[0].nic.pkg
+ sut_nodes[0].nic.pkg["type"] + " " + sut_nodes[0].nic.pkg["version"]
+ if sut_nodes[0].nic.pkg
else None
)
result.driver = settings.load_global_setting(settings.HOST_DRIVER_SETTING)
- result.dpdk_version = duts[0].dpdk_version
- dts_run_target(duts, tester, targets, test_suites, subtitle)
+ result.dpdk_version = sut_nodes[0].dpdk_version
+ dts_run_target(sut_nodes, tg_node, targets, test_suites, subtitle)
- dts_crbs_exit(duts, tester)
+ dts_nodes_exit(sut_nodes, tg_node)
def show_speedup_options_messages(read_cache, skip_setup):
@@ -674,21 +674,21 @@ def save_all_results():
stats_report.save(result)
-def quit_execution(duts, tester):
+def quit_execution(sut_nodes, tg_node):
"""
- Close session to DUT and tester before quit.
+ Close session to SUT and TG before quit.
Return exit status when failure occurred.
"""
# close all nics
- for dutobj in duts:
- if getattr(dutobj, "ports_info", None) and dutobj.ports_info:
- for port_info in dutobj.ports_info:
+ for sut_node in sut_nodes:
+ if getattr(sut_node, "ports_info", None) and sut_node.ports_info:
+ for port_info in sut_node.ports_info:
netdev = port_info["port"]
netdev.close()
# close all session
- dutobj.close()
- if tester is not None:
- tester.close()
+ sut_node.close()
+ if tg_node is not None:
+ tg_node.close()
log_handler.info("DTS ended")
# return value
diff --git a/framework/excel_reporter.py b/framework/excel_reporter.py
index a29dd030..735917b9 100644
--- a/framework/excel_reporter.py
+++ b/framework/excel_reporter.py
@@ -8,7 +8,7 @@ Excel spreadsheet generator
Example:
excel_report = ExcelReporter('../output/test_results.xls')
result = Result()
- result.dut = dutIP
+ result.sut = sutIP
result.target = target
result.nic = nic
result.test_suite = test_suite
@@ -18,7 +18,7 @@ Example:
Result:
execl will be formatted as
- DUT Target NIC Test suite Test case Results
+ SUT Target NIC Test suite Test case Results
10.239.128.117 x86_64-native-linuxapp-gcc IXGBE_10G-82599_SFP
SUITE CASE PASSED
@@ -47,7 +47,7 @@ class ExcelReporter(object):
self.sheet = self.workbook.add_sheet("Test Results", cell_overwrite_ok=True)
def __add_header(self):
- self.sheet.write(0, 0, "DUT", self.header_style)
+ self.sheet.write(0, 0, "SUT", self.header_style)
self.sheet.write(0, 1, "DPDK version", self.header_style)
self.sheet.write(0, 2, "Target", self.header_style)
self.sheet.write(0, 3, "NIC", self.header_style)
@@ -119,22 +119,22 @@ class ExcelReporter(object):
self.title_style = xlwt.XFStyle()
self.title_style.font = title_font
- def __get_case_result(self, dut, target, suite, case):
- case_list = self.result.all_test_cases(dut, target, suite)
+ def __get_case_result(self, sut, target, suite, case):
+ case_list = self.result.all_test_cases(sut, target, suite)
if case_list.count(case) > 1:
tmp_result = []
for case_name in case_list:
if case == case_name:
- test_result = self.result.result_for(dut, target, suite, case)
+ test_result = self.result.result_for(sut, target, suite, case)
if "PASSED" in test_result:
return ["PASSED", ""]
else:
tmp_result.append(test_result)
return tmp_result[-1]
else:
- return self.result.result_for(dut, target, suite, case)
+ return self.result.result_for(sut, target, suite, case)
- def __write_result(self, dut, target, suite, case, test_result):
+ def __write_result(self, sut, target, suite, case, test_result):
if test_result is not None and len(test_result) > 0:
result = test_result[0]
if test_result[1] != "":
@@ -151,32 +151,32 @@ class ExcelReporter(object):
self.failed_style,
)
- def __write_cases(self, dut, target, suite):
- for case in set(self.result.all_test_cases(dut, target, suite)):
- result = self.__get_case_result(dut, target, suite, case)
+ def __write_cases(self, sut, target, suite):
+ for case in set(self.result.all_test_cases(sut, target, suite)):
+ result = self.__get_case_result(sut, target, suite, case)
self.col += 1
if case[:5] == "test_":
self.sheet.write(self.row, self.col, case[5:])
else:
self.sheet.write(self.row, self.col, case)
- self.__write_result(dut, target, suite, case, result)
+ self.__write_result(sut, target, suite, case, result)
self.row += 1
self.col -= 1
- def __write_suites(self, dut, target):
- for suite in self.result.all_test_suites(dut, target):
+ def __write_suites(self, sut, target):
+ for suite in self.result.all_test_suites(sut, target):
self.row += 1
self.col += 1
self.sheet.write(self.row, self.col, suite)
- self.__write_cases(dut, target, suite)
+ self.__write_cases(sut, target, suite)
self.col -= 1
- def __write_nic(self, dut, target):
- nic = self.result.current_nic(dut, target)
- driver = self.result.current_driver(dut)
- kdriver = self.result.current_kdriver(dut)
- firmware = self.result.current_firmware_version(dut)
- pkg = self.result.current_package_version(dut)
+ def __write_nic(self, sut, target):
+ nic = self.result.current_nic(sut, target)
+ driver = self.result.current_driver(sut)
+ kdriver = self.result.current_kdriver(sut)
+ firmware = self.result.current_firmware_version(sut)
+ pkg = self.result.current_package_version(sut)
self.col += 1
self.sheet.col(self.col).width = 32 * 256 # 32 characters
self.sheet.write(self.row, self.col, nic, self.title_style)
@@ -187,11 +187,11 @@ class ExcelReporter(object):
self.sheet.write(self.row + 4, self.col, "pkg: " + pkg)
self.row = self.row + 1
self.row = self.row + 3
- self.__write_suites(dut, target)
+ self.__write_suites(sut, target)
self.col -= 1
- def __write_failed_target(self, dut, target):
- msg = "TARGET ERROR '%s'" % self.result.target_failed_msg(dut, target)
+ def __write_failed_target(self, sut, target):
+ msg = "TARGET ERROR '%s'" % self.result.target_failed_msg(sut, target)
self.sheet.write(
self.row,
self.col + 4,
@@ -202,19 +202,19 @@ class ExcelReporter(object):
)
self.row += 1
- def __write_targets(self, dut):
- for target in self.result.all_targets(dut):
+ def __write_targets(self, sut):
+ for target in self.result.all_targets(sut):
self.col += 1
self.sheet.write(self.row, self.col, target, self.title_style)
- if self.result.is_target_failed(dut, target):
- self.__write_failed_target(dut, target)
+ if self.result.is_target_failed(sut, target):
+ self.__write_failed_target(sut, target)
else:
- self.__write_nic(dut, target)
+ self.__write_nic(sut, target)
self.row += 1
self.col -= 1
- def __write_failed_dut(self, dut):
- msg = "PREREQ FAILED '%s'" % self.result.dut_failed_msg(dut)
+ def __write_failed_sut(self, sut):
+ msg = "PREREQ FAILED '%s'" % self.result.sut_failed_msg(sut)
self.sheet.write(
self.row,
self.col + 5,
@@ -226,19 +226,19 @@ class ExcelReporter(object):
self.row += 1
def __parse_result(self):
- for dut in self.result.all_duts():
- self.sheet.write(self.row, self.col, dut, self.title_style)
- if self.result.is_dut_failed(dut):
- self.__write_failed_dut(dut)
+ for sut in self.result.all_suts():
+ self.sheet.write(self.row, self.col, sut, self.title_style)
+ if self.result.is_sut_failed(sut):
+ self.__write_failed_sut(sut)
else:
self.col = self.col + 1
self.sheet.write(
self.row,
self.col,
- self.result.current_dpdk_version(dut),
+ self.result.current_dpdk_version(sut),
self.title_style,
)
- self.__write_targets(dut)
+ self.__write_targets(sut)
self.row += 1
def save(self, result):
diff --git a/framework/exception.py b/framework/exception.py
index fb0fa72e..6b8faf65 100644
--- a/framework/exception.py
+++ b/framework/exception.py
@@ -127,13 +127,13 @@ class VirtConfigParamException(Exception):
return "Faile to execute param [%s]" % (self.param)
-class VirtDutConnectException(Exception):
+class VirtSutConnectException(Exception):
pass
-class VirtDutInitException(Exception):
- def __init__(self, vm_dut):
- self.vm_dut = vm_dut
+class VirtSutInitException(Exception):
+ def __init__(self, vm_sut):
+ self.vm_sut = vm_sut
class VirtDeviceCreateException(Exception):
diff --git a/framework/ixia_network/ixnet.py b/framework/ixia_network/ixnet.py
index 7c562423..1b135a81 100644
--- a/framework/ixia_network/ixnet.py
+++ b/framework/ixia_network/ixnet.py
@@ -62,7 +62,7 @@ class IxnetTrafficGenerator(object):
return response
def __get_ports(self):
- """Return available tg vports list"""
+ """Return available TG vports list"""
return self.tg_vports
def disable_port_misdirected(self):
diff --git a/framework/json_reporter.py b/framework/json_reporter.py
index 2355bdac..37379b6f 100644
--- a/framework/json_reporter.py
+++ b/framework/json_reporter.py
@@ -9,10 +9,10 @@ class JSONReporter(object):
def __init__(self, filename):
self.filename = filename
- def __scan_cases(self, result, dut, target, suite):
+ def __scan_cases(self, result, sut, target, suite):
case_results = {}
- for case in result.all_test_cases(dut, target, suite):
- test_result = result.result_for(dut, target, suite, case)
+ for case in result.all_test_cases(sut, target, suite):
+ test_result = result.result_for(sut, target, suite, case)
case_name = "{}/{}".format(suite, case)
case_results[case_name] = test_result
if "PASSED" in test_result:
@@ -25,34 +25,34 @@ class JSONReporter(object):
case_results[case_name] = "blocked"
return case_results
- def __scan_target(self, result, dut, target):
- if result.is_target_failed(dut, target):
+ def __scan_target(self, result, sut, target):
+ if result.is_target_failed(sut, target):
return "fail"
case_results = {}
- for suite in result.all_test_suites(dut, target):
- case_results.update(self.__scan_cases(result, dut, target, suite))
+ for suite in result.all_test_suites(sut, target):
+ case_results.update(self.__scan_cases(result, sut, target, suite))
return case_results
- def __scan_dut(self, result, dut):
- if result.is_dut_failed(dut):
+ def __scan_sut(self, result, sut):
+ if result.is_sut_failed(sut):
return "fail"
target_map = {}
- target_map["dpdk_version"] = result.current_dpdk_version(dut)
+ target_map["dpdk_version"] = result.current_dpdk_version(sut)
target_map["nic"] = {}
- for target in result.all_targets(dut):
- target_map["nic"]["name"] = result.current_nic(dut, target)
- target_map[target] = self.__scan_target(result, dut, target)
- target_map["nic"]["kdriver"] = result.current_kdriver(dut)
- target_map["nic"]["driver"] = result.current_driver(dut)
- target_map["nic"]["firmware"] = result.current_firmware_version(dut)
- if result.current_package_version(dut) is not None:
- target_map["nic"]["pkg"] = result.current_package_version(dut)
+ for target in result.all_targets(sut):
+ target_map["nic"]["name"] = result.current_nic(sut, target)
+ target_map[target] = self.__scan_target(result, sut, target)
+ target_map["nic"]["kdriver"] = result.current_kdriver(sut)
+ target_map["nic"]["driver"] = result.current_driver(sut)
+ target_map["nic"]["firmware"] = result.current_firmware_version(sut)
+ if result.current_package_version(sut) is not None:
+ target_map["nic"]["pkg"] = result.current_package_version(sut)
return target_map
def save(self, result):
result_map = {}
- for dut in result.all_duts():
- result_map[dut] = self.__scan_dut(result, dut)
+ for sut in result.all_suts():
+ result_map[sut] = self.__scan_sut(result, sut)
with open(self.filename, "w") as outfile:
json.dump(
result_map, outfile, indent=4, separators=(",", ": "), sort_keys=True
diff --git a/framework/logger.py b/framework/logger.py
index b8576342..34d364f4 100644
--- a/framework/logger.py
+++ b/framework/logger.py
@@ -17,42 +17,42 @@ will saved into different log files.
"""
verbose = False
-logging.DTS_DUT_CMD = logging.INFO + 1
-logging.DTS_DUT_OUTPUT = logging.DEBUG + 1
-logging.DTS_DUT_RESULT = logging.WARNING + 1
+logging.DTS_SUT_CMD = logging.INFO + 1
+logging.DTS_SUT_OUTPUT = logging.DEBUG + 1
+logging.DTS_SUT_RESULT = logging.WARNING + 1
-logging.DTS_TESTER_CMD = logging.INFO + 2
-logging.DTS_TESTER_OUTPUT = logging.DEBUG + 2
-logging.DTS_TESTER_RESULT = logging.WARNING + 2
+logging.DTS_TG_CMD = logging.INFO + 2
+logging.DTS_TG_OUTPUT = logging.DEBUG + 2
+logging.DTS_TG_RESULT = logging.WARNING + 2
-logging.SUITE_DUT_CMD = logging.INFO + 3
-logging.SUITE_DUT_OUTPUT = logging.DEBUG + 3
+logging.SUITE_SUT_CMD = logging.INFO + 3
+logging.SUITE_SUT_OUTPUT = logging.DEBUG + 3
-logging.SUITE_TESTER_CMD = logging.INFO + 4
-logging.SUITE_TESTER_OUTPUT = logging.DEBUG + 4
+logging.SUITE_TG_CMD = logging.INFO + 4
+logging.SUITE_TG_OUTPUT = logging.DEBUG + 4
-logging.DTS_VIRTDUT_CMD = logging.INFO + 6
-logging.DTS_VIRTDUT_OUTPUT = logging.DEBUG + 6
+logging.DTS_VIRTSUT_CMD = logging.INFO + 6
+logging.DTS_VIRTSUT_OUTPUT = logging.DEBUG + 6
logging.DTS_PKTGEN_CMD = logging.INFO + 7
logging.DTS_PKTGEN_OUTPUT = logging.DEBUG + 7
-logging.addLevelName(logging.DTS_DUT_CMD, "DTS_DUT_CMD")
-logging.addLevelName(logging.DTS_DUT_OUTPUT, "DTS_DUT_OUTPUT")
-logging.addLevelName(logging.DTS_DUT_RESULT, "DTS_DUT_RESULT")
+logging.addLevelName(logging.DTS_SUT_CMD, "DTS_SUT_CMD")
+logging.addLevelName(logging.DTS_SUT_OUTPUT, "DTS_SUT_OUTPUT")
+logging.addLevelName(logging.DTS_SUT_RESULT, "DTS_SUT_RESULT")
-logging.addLevelName(logging.DTS_TESTER_CMD, "DTS_TESTER_CMD")
-logging.addLevelName(logging.DTS_TESTER_OUTPUT, "DTS_TESTER_OUTPUT")
-logging.addLevelName(logging.DTS_TESTER_RESULT, "DTS_TESTER_RESULT")
+logging.addLevelName(logging.DTS_TG_CMD, "DTS_TG_CMD")
+logging.addLevelName(logging.DTS_TG_OUTPUT, "DTS_TG_OUTPUT")
+logging.addLevelName(logging.DTS_TG_RESULT, "DTS_TG_RESULT")
-logging.addLevelName(logging.DTS_VIRTDUT_CMD, "VIRTDUT_CMD")
-logging.addLevelName(logging.DTS_VIRTDUT_OUTPUT, "VIRTDUT_OUTPUT")
+logging.addLevelName(logging.DTS_VIRTSUT_CMD, "VIRTSUT_CMD")
+logging.addLevelName(logging.DTS_VIRTSUT_OUTPUT, "VIRTSUT_OUTPUT")
-logging.addLevelName(logging.SUITE_DUT_CMD, "SUITE_DUT_CMD")
-logging.addLevelName(logging.SUITE_DUT_OUTPUT, "SUITE_DUT_OUTPUT")
+logging.addLevelName(logging.SUITE_SUT_CMD, "SUITE_SUT_CMD")
+logging.addLevelName(logging.SUITE_SUT_OUTPUT, "SUITE_SUT_OUTPUT")
-logging.addLevelName(logging.SUITE_TESTER_CMD, "SUITE_TESTER_CMD")
-logging.addLevelName(logging.SUITE_TESTER_OUTPUT, "SUITE_TESTER_OUTPUT")
+logging.addLevelName(logging.SUITE_TG_CMD, "SUITE_TG_CMD")
+logging.addLevelName(logging.SUITE_TG_OUTPUT, "SUITE_TG_OUTPUT")
logging.addLevelName(logging.DTS_PKTGEN_CMD, "DTS_PKTGEN_CMD")
logging.addLevelName(logging.DTS_PKTGEN_OUTPUT, "DTS_PKTGEN_OUTPUT")
@@ -77,41 +77,41 @@ class BaseLoggerAdapter(logging.LoggerAdapter):
Upper layer of original logging module.
"""
- def dts_dut_cmd(self, msg, *args, **kwargs):
- self.log(logging.DTS_DUT_CMD, msg, *args, **kwargs)
+ def dts_sut_cmd(self, msg, *args, **kwargs):
+ self.log(logging.DTS_SUT_CMD, msg, *args, **kwargs)
- def dts_dut_output(self, msg, *args, **kwargs):
- self.log(logging.DTS_DUT_OUTPUT, msg, *args, **kwargs)
+ def dts_sut_output(self, msg, *args, **kwargs):
+ self.log(logging.DTS_SUT_OUTPUT, msg, *args, **kwargs)
- def dts_dut_result(self, msg, *args, **kwargs):
- self.log(logging.DTS_DUT_RESULT, msg, *args, **kwargs)
+ def dts_sut_result(self, msg, *args, **kwargs):
+ self.log(logging.DTS_SUT_RESULT, msg, *args, **kwargs)
- def dts_tester_cmd(self, msg, *args, **kwargs):
- self.log(logging.DTS_TESTER_CMD, msg, *args, **kwargs)
+ def dts_tg_cmd(self, msg, *args, **kwargs):
+ self.log(logging.DTS_TG_CMD, msg, *args, **kwargs)
- def dts_tester_output(self, msg, *args, **kwargs):
- self.log(logging.DTS_TESTER_CMD, msg, *args, **kwargs)
+ def dts_tg_output(self, msg, *args, **kwargs):
+ self.log(logging.DTS_TG_CMD, msg, *args, **kwargs)
- def dts_tester_result(self, msg, *args, **kwargs):
- self.log(logging.DTS_TESTER_RESULT, msg, *args, **kwargs)
+ def dts_tg_result(self, msg, *args, **kwargs):
+ self.log(logging.DTS_TG_RESULT, msg, *args, **kwargs)
- def suite_dut_cmd(self, msg, *args, **kwargs):
- self.log(logging.SUITE_DUT_CMD, msg, *args, **kwargs)
+ def suite_sut_cmd(self, msg, *args, **kwargs):
+ self.log(logging.SUITE_SUT_CMD, msg, *args, **kwargs)
- def suite_dut_output(self, msg, *args, **kwargs):
- self.log(logging.SUITE_DUT_OUTPUT, msg, *args, **kwargs)
+ def suite_sut_output(self, msg, *args, **kwargs):
+ self.log(logging.SUITE_SUT_OUTPUT, msg, *args, **kwargs)
- def suite_tester_cmd(self, msg, *args, **kwargs):
- self.log(logging.SUITE_TESTER_CMD, msg, *args, **kwargs)
+ def suite_tg_cmd(self, msg, *args, **kwargs):
+ self.log(logging.SUITE_TG_CMD, msg, *args, **kwargs)
- def suite_tester_output(self, msg, *args, **kwargs):
- self.log(logging.SUITE_TESTER_OUTPUT, msg, *args, **kwargs)
+ def suite_tg_output(self, msg, *args, **kwargs):
+ self.log(logging.SUITE_TG_OUTPUT, msg, *args, **kwargs)
- def dts_virtdut_cmd(self, msg, *args, **kwargs):
- self.log(logging.DTS_VIRTDUT_CMD, msg, *args, **kwargs)
+ def dts_virtsut_cmd(self, msg, *args, **kwargs):
+ self.log(logging.DTS_VIRTSUT_CMD, msg, *args, **kwargs)
- def dts_virtdut_output(self, msg, *args, **kwargs):
- self.log(logging.DTS_VIRTDUT_OUTPUT, msg, *args, **kwargs)
+ def dts_virtsut_output(self, msg, *args, **kwargs):
+ self.log(logging.DTS_VIRTSUT_OUTPUT, msg, *args, **kwargs)
def dts_pktgen_cmd(self, msg, *args, **kwargs):
self.log(logging.DTS_PKTGEN_CMD, msg, *args, **kwargs)
@@ -127,22 +127,22 @@ class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: "", # SYSTEM
- logging.DTS_DUT_OUTPUT: "\033[00;37m", # WHITE
- logging.DTS_TESTER_OUTPUT: "\033[00;37m", # WHITE
- logging.SUITE_DUT_OUTPUT: "\033[00;37m", # WHITE
- logging.SUITE_TESTER_OUTPUT: "\033[00;37m", # WHITE
+ logging.DTS_SUT_OUTPUT: "\033[00;37m", # WHITE
+ logging.DTS_TG_OUTPUT: "\033[00;37m", # WHITE
+ logging.SUITE_SUT_OUTPUT: "\033[00;37m", # WHITE
+ logging.SUITE_TG_OUTPUT: "\033[00;37m", # WHITE
logging.INFO: "\033[00;36m", # CYAN
- logging.DTS_DUT_CMD: "", # SYSTEM
- logging.DTS_TESTER_CMD: "", # SYSTEM
- logging.SUITE_DUT_CMD: "", # SYSTEM
- logging.SUITE_TESTER_CMD: "", # SYSTEM
+ logging.DTS_SUT_CMD: "", # SYSTEM
+ logging.DTS_TG_CMD: "", # SYSTEM
+ logging.SUITE_SUT_CMD: "", # SYSTEM
+ logging.SUITE_TG_CMD: "", # SYSTEM
logging.DTS_PKTGEN_CMD: "", # SYSTEM
logging.DTS_PKTGEN_OUTPUT: "", # SYSTEM
- logging.DTS_VIRTDUT_CMD: "", # SYSTEM
- logging.DTS_VIRTDUT_OUTPUT: "", # SYSTEM
+ logging.DTS_VIRTSUT_CMD: "", # SYSTEM
+ logging.DTS_VIRTSUT_OUTPUT: "", # SYSTEM
logging.WARN: "\033[01;33m", # BOLD YELLOW
- logging.DTS_DUT_RESULT: "\033[01;34m", # BOLD BLUE
- logging.DTS_TESTER_RESULT: "\033[01;34m", # BOLD BLUE
+ logging.DTS_SUT_RESULT: "\033[01;34m", # BOLD BLUE
+ logging.DTS_TG_RESULT: "\033[01;34m", # BOLD BLUE
logging.ERROR: "\033[01;31m", # BOLD RED
logging.CRITICAL: "\033[01;31m", # BOLD RED
}
@@ -157,7 +157,7 @@ class DTSLOG(BaseLoggerAdapter):
DTS log class for framework and testsuite.
"""
- def __init__(self, logger, crb="suite"):
+ def __init__(self, logger, node="suite"):
global log_dir
filename = inspect.stack()[1][1][:-3]
@@ -170,15 +170,15 @@ class DTSLOG(BaseLoggerAdapter):
self.log_path = os.getcwd() + "/" + FOLDERS["Output"]
else:
self.log_path = (
- log_dir # log dir should contain tag/crb global value and mod in dts
+ log_dir # log dir should contain tag/node global value and mod in dts
)
self.dts_log = "dts.log"
self.logger = logger
self.logger.setLevel(logging.DEBUG)
- self.crb = crb
- super(DTSLOG, self).__init__(self.logger, dict(crb=self.crb))
+ self.node = node
+ super(DTSLOG, self).__init__(self.logger, dict(node=self.node))
self.fh = None
self.ch = None
@@ -262,7 +262,7 @@ class DTSLOG(BaseLoggerAdapter):
"""
self.fh.setLevel(lvl)
- def config_execution(self, crb):
+ def config_execution(self, node):
"""
Reconfigure stream&logfile level and reset info,debug,warn level.
"""
@@ -271,27 +271,27 @@ class DTSLOG(BaseLoggerAdapter):
ch = ColorHandler()
self.__log_handler(fh, ch)
- if crb.startswith("dut"):
- self.info_lvl = logging.DTS_DUT_CMD
- self.debug_lvl = logging.DTS_DUT_OUTPUT
- self.warn_lvl = logging.DTS_DUT_RESULT
- elif crb.startswith("tester"):
- self.info_lvl = logging.DTS_TESTER_CMD
- self.debug_lvl = logging.DTS_TESTER_OUTPUT
- self.warn_lvl = logging.DTS_TESTER_RESULT
- elif crb.startswith("pktgen"):
+ if node.startswith("sut"):
+ self.info_lvl = logging.DTS_SUT_CMD
+ self.debug_lvl = logging.DTS_SUT_OUTPUT
+ self.warn_lvl = logging.DTS_SUT_RESULT
+ elif node.startswith("tg"):
+ self.info_lvl = logging.DTS_TG_CMD
+ self.debug_lvl = logging.DTS_TG_OUTPUT
+ self.warn_lvl = logging.DTS_TG_RESULT
+ elif node.startswith("perf_tg"):
self.info_lvl = logging.DTS_PKTGEN_CMD
self.debug_lvl = logging.DTS_PKTGEN_OUTPUT
- elif crb.startswith("virtdut"):
- self.info_lvl = logging.DTS_VIRTDUT_CMD
- self.debug_lvl = logging.DTS_VIRTDUT_OUTPUT
+ elif node.startswith("virtsut"):
+ self.info_lvl = logging.DTS_VIRTSUT_CMD
+ self.debug_lvl = logging.DTS_VIRTSUT_OUTPUT
else:
self.error_lvl = logging.ERROR
self.warn_lvl = logging.WARNING
self.info_lvl = logging.INFO
self.debug_lvl = logging.DEBUG
- def config_suite(self, suitename, crb=None):
+ def config_suite(self, suitename, node=None):
"""
Reconfigure stream&logfile level and reset info,debug level.
"""
@@ -305,18 +305,18 @@ class DTSLOG(BaseLoggerAdapter):
# then add handler
self.__log_handler(fh, ch)
- if crb == "dut":
- self.info_lvl = logging.SUITE_DUT_CMD
- self.debug_lvl = logging.SUITE_DUT_OUTPUT
- elif crb == "tester":
- self.info_lvl = logging.SUITE_TESTER_CMD
- self.debug_lvl = logging.SUITE_TESTER_OUTPUT
- elif crb == "pktgen":
+ if node == "sut":
+ self.info_lvl = logging.SUITE_SUT_CMD
+ self.debug_lvl = logging.SUITE_SUT_OUTPUT
+ elif node == "tg":
+ self.info_lvl = logging.SUITE_TG_CMD
+ self.debug_lvl = logging.SUITE_TG_OUTPUT
+ elif node == "perf_tg":
self.info_lvl = logging.DTS_PKTGEN_CMD
self.debug_lvl = logging.DTS_PKTGEN_OUTPUT
- elif crb == "virtdut":
- self.info_lvl = logging.DTS_VIRTDUT_CMD
- self.debug_lvl = logging.DTS_VIRTDUT_OUTPUT
+ elif node == "virtsut":
+ self.info_lvl = logging.DTS_VIRTSUT_CMD
+ self.debug_lvl = logging.DTS_VIRTSUT_OUTPUT
def logger_exit(self):
"""
@@ -328,19 +328,19 @@ class DTSLOG(BaseLoggerAdapter):
self.logger.removeHandler(self.ch)
-def getLogger(name, crb="suite"):
+def getLogger(name, node="suite"):
"""
- Get logger handler and if there's no handler for specified CRB will create one.
+ Get logger handler and if there's no handler for specified Node will create one.
"""
global Loggers
# return saved logger
for logger in Loggers:
- if logger["name"] == name and logger["crb"] == crb:
+ if logger["name"] == name and logger["node"] == node:
return logger["logger"]
# return new logger
- logger = DTSLOG(logging.getLogger(name), crb)
- Loggers.append({"logger": logger, "name": name, "crb": crb})
+ logger = DTSLOG(logging.getLogger(name), node)
+ Loggers.append({"logger": logger, "name": name, "node": node})
return logger
@@ -428,7 +428,7 @@ class LogParser(object):
def parse_logfile(self):
loglist = []
- out_type = "DTS_DUT_OUTPUT"
+ out_type = "DTS_SUT_OUTPUT"
for line in self.log_handler:
tmp = {}
line = line.replace("\n", "")
diff --git a/framework/multiple_vm.py b/framework/multiple_vm.py
index aac8f160..6f7bcb7c 100644
--- a/framework/multiple_vm.py
+++ b/framework/multiple_vm.py
@@ -12,23 +12,23 @@ from .utils import RED
class MultipleVM(object):
"""
- Module for handle VM related actions in parallel on multiple DUTs
+ Module for handle VM related actions in parallel on multiple SUTs
Supported actions: [start|command|migration]
Param max_vm: maximum number of threads
- Param duts: list of DUT objects
+ Param sut_nodes: list of SUT node objects
"""
- def __init__(self, max_vm, duts):
+ def __init__(self, max_vm, sut_nodes):
self.max_vm = max_vm
- self.duts = duts
+ self.sut_nodes = sut_nodes
self.pool = threadpool.ThreadPool(max_vm)
- self.pool_result = [dict() for _ in duts]
+ self.pool_result = [dict() for _ in sut_nodes]
self._pool_requests = list()
self._pool_executors = dict()
self.logger = getLogger("multiple_vm")
self.logger.info(
- "Created MultipleVM instance with %d DUTs and %d VMs" % (len(duts), max_vm)
+ "Created MultipleVM instance with %d SUTs and %d VMs" % (len(sut_nodes), max_vm)
)
def parallel_vm_start(self, args):
@@ -37,7 +37,7 @@ class MultipleVM(object):
Args format:
{
'name': 'VM0',
- 'dut_id': 1,
+ 'sut_id': 1,
'autodetect_topo': False,
'virt_config': { 'suite_name': '',
'vm_name': '',
@@ -62,21 +62,21 @@ class MultipleVM(object):
return format:
{
'name': 'VM0',
- 'dut_id' : 1,
+ 'sut_id' : 1,
'vm_obj': vm_obj
}
"""
result = {}
vm_name = args["name"]
- dut_id = args["dut_id"]
+ sut_id = args["sut_id"]
if "autodetect_topo" in args:
autodetect_topo = args["autodetect_topo"]
else:
autodetect_topo = True
- self.logger.info("Parallel task start for DUT%d %s" % (dut_id, vm_name))
+ self.logger.info("Parallel task start for SUT%d %s" % (sut_id, vm_name))
threading.current_thread().name = vm_name
from .qemu_kvm import QEMUKvm
@@ -85,19 +85,19 @@ class MultipleVM(object):
if "virt_config" in args:
suite_name = args["virt_config"]["suite_name"]
vm_name = args["virt_config"]["vm_name"]
- vm_obj = QEMUKvm(self.duts[dut_id], vm_name, suite_name)
+ vm_obj = QEMUKvm(self.sut_nodes[sut_id], vm_name, suite_name)
if "virt_params" in args:
virt_params = args["virt_params"]
else:
virt_params = dict()
else:
# VM configured by parameters
- vm_obj = QEMUKvm(self.duts[dut_id], vm_name, "multi_vm")
+ vm_obj = QEMUKvm(self.sut_nodes[sut_id], vm_name, "multi_vm")
virt_params = args["virt_params"]
# just save config, should be list
vm_obj.set_local_config([virt_params])
- vm_dut = None
+ vm_sut = None
if vm_obj.check_alive():
self.logger.debug("Check VM[%s] is alive" % vm_name)
@@ -106,24 +106,24 @@ class MultipleVM(object):
if "migration" in virt_params:
self.logger.debug("Immigrated VM[%s] is ready" % vm_name)
else:
- vm_dut = vm_obj.instantiate_vm_dut(autodetect_topo=autodetect_topo)
- self.logger.debug("VM[%s] instantiate vm dut is done" % vm_name)
+ vm_sut = vm_obj.instantiate_vm_sut(autodetect_topo=autodetect_topo)
+ self.logger.debug("VM[%s] instantiate vm SUT is done" % vm_name)
else:
vm_obj.quick_start()
- self.duts[dut_id].logger.debug("VM[%s] quick start is done" % vm_name)
+ self.sut_nodes[sut_id].logger.debug("VM[%s] quick start is done" % vm_name)
if "migration" in virt_params:
self.logger.debug("Immigrated VM[%s] is ready" % vm_name)
else:
vm_obj._check_vm_status()
self.logger.debug("VM[%s] check status is done" % vm_name)
- vm_dut = vm_obj.instantiate_vm_dut(autodetect_topo=autodetect_topo)
- self.logger.debug("VM[%s] instantiate vm dut is done" % vm_name)
+ vm_sut = vm_obj.instantiate_vm_sut(autodetect_topo=autodetect_topo)
+ self.logger.debug("VM[%s] instantiate vm SUT is done" % vm_name)
result["name"] = vm_name
- result["dut_id"] = dut_id
+ result["sut_id"] = sut_id
result["vm_obj"] = vm_obj
- result["vm_dut"] = vm_dut
- self.logger.info("Parallel task DUT%d %s Done and returned" % (dut_id, vm_name))
+ result["vm_sut"] = vm_sut
+ self.logger.info("Parallel task SUT%d %s Done and returned" % (sut_id, vm_name))
return result
def parallel_vm_stop(self, args):
@@ -135,8 +135,8 @@ class MultipleVM(object):
Args format:
{
'name': 'vm1',
- 'vm_dut': self.vm_dut,
- 'dut_id': 0,
+ 'vm_sut': self.vm_sut,
+ 'sut_id': 0,
'commands': ['cd dpdk', 'make install T=x86_64-native-linuxapp-gcc'],
'expects': ['#', "#"],
'timeouts': [5, 120],
@@ -144,8 +144,8 @@ class MultipleVM(object):
"""
result = {}
vm_name = args["name"]
- vm_dut = args["vm_dut"]
- dut_id = args["dut_id"]
+ vm_sut = args["vm_sut"]
+ sut_id = args["sut_id"]
commands = args["commands"]
expects = args["expects"]
timeouts = args["timeouts"]
@@ -154,7 +154,7 @@ class MultipleVM(object):
if "delay" in args:
time.sleep(args["delay"])
- self.logger.debug("Parallel task start for DUT%d %s" % (dut_id, vm_name))
+ self.logger.debug("Parallel task start for SUT%d %s" % (sut_id, vm_name))
combinations = list(zip(commands, expects, timeouts))
for combine in combinations:
@@ -163,16 +163,16 @@ class MultipleVM(object):
add_time = int(self.max_vm * 0.5)
timeout += add_time
if len(expect) == 0:
- output = vm_dut.send_command(command, timeout)
+ output = vm_sut.send_command(command, timeout)
else:
- output = vm_dut.send_expect(command, expect, timeout)
+ output = vm_sut.send_expect(command, expect, timeout)
outputs.append(output)
result["name"] = vm_name
- result["dut_id"] = dut_id
+ result["sut_id"] = sut_id
result["outputs"] = outputs
self.logger.debug(
- "Parallel task for DUT%d %s has been done and returned" % (dut_id, vm_name)
+ "Parallel task for SUT%d %s has been done and returned" % (sut_id, vm_name)
)
return result
@@ -191,7 +191,7 @@ class MultipleVM(object):
result = {}
vm_name = args["name"]
vm_obj = args["vm_obj"]
- dut_id = args["dut_id"]
+ sut_id = args["sut_id"]
remote_ip = args["remote_ip"]
migrate_port = args["migrate_port"]
@@ -199,7 +199,7 @@ class MultipleVM(object):
vm_obj.wait_migration_done()
result["name"] = vm_name
- result["dut_id"] = dut_id
+ result["sut_id"] = sut_id
return result
@@ -207,8 +207,8 @@ class MultipleVM(object):
"""
Save result in local variable, will be used later
"""
- self.pool_result[result["dut_id"]][result["name"]] = result
- self.pool_result[result["dut_id"]][result["name"]]["status"] = 0
+ self.pool_result[result["sut_id"]][result["name"]] = result
+ self.pool_result[result["sut_id"]][result["name"]]["status"] = 0
def handle_vm_exception(self, request, exc_info):
"""
@@ -224,14 +224,14 @@ class MultipleVM(object):
# print traceback info for exception
name = request.args[0]["name"]
self.logger.error(
- ("**** Exception occurred DUT%d:%s" % (request.args[0]["dut_id"], name))
+ ("**** Exception occurred SUT%d:%s" % (request.args[0]["sut_id"], name))
)
exc_type, exc_value, exc_traceback = exc_info
self.logger.error(repr(traceback.format_tb(exc_traceback)))
- result = {"name": name, "dut_id": request.args[0]["dut_id"]}
- self.pool_result[result["dut_id"]][result["name"]] = result
- self.pool_result[result["dut_id"]][result["name"]]["status"] = DTS_ERR_TBL[
+ result = {"name": name, "sut_id": request.args[0]["sut_id"]}
+ self.pool_result[result["sut_id"]][result["name"]] = result
+ self.pool_result[result["sut_id"]][result["name"]]["status"] = DTS_ERR_TBL[
"PARALLEL_EXECUTE_ERR"
]
@@ -267,7 +267,7 @@ class MultipleVM(object):
# set parallel mode
save_global_setting(DTS_PARALLEL_SETTING, "yes")
- self.pool_result = [dict() for _ in self.duts]
+ self.pool_result = [dict() for _ in self.sut_nodes]
for req in self._pool_requests:
self.pool.putRequest(req)
diff --git a/framework/crb.py b/framework/node.py
similarity index 91%
rename from framework/crb.py
rename to framework/node.py
index 5ce4e2c9..c54a0a33 100644
--- a/framework/crb.py
+++ b/framework/node.py
@@ -6,31 +6,31 @@ import os
import re
import time
-from .config import PORTCONF, PktgenConf, PortConf
+from .config import PORTCONF, PortConf, TrafficGeneratorConf
from .logger import getLogger
-from .settings import TIMEOUT
+from .settings import PERF_TG_CONF_KEY, TIMEOUT
from .ssh_connection import SSHConnection
"""
-CRB (customer reference board) basic functions and handlers
+A node is a generic host that DTS connects to and manages.
"""
-class Crb(object):
+class Node(object):
"""
- Basic module for customer reference board. This module implement functions
- interact with CRB. With these function, we can get the information of
- CPU/PCI/NIC on the board and setup running environment for DPDK.
+ Basic module for node management. This module implements methods that
+ manage a node, such as information gathering (of CPU/PCI/NIC) and
+ environment setup.
"""
PCI_DEV_CACHE_KEY = None
NUMBER_CORES_CACHE_KEY = None
CORE_LIST_CACHE_KEY = None
- def __init__(self, crb, serializer, dut_id=0, name=None, alt_session=True):
- self.dut_id = dut_id
- self.crb = crb
+ def __init__(self, node, serializer, sut_id=0, name=None, alt_session=True):
+ self.sut_id = sut_id
+ self.node = node
self.read_cache = False
self.skip_setup = False
self.serializer = serializer
@@ -48,7 +48,7 @@ class Crb(object):
name,
self.get_username(),
self.get_password(),
- dut_id,
+ sut_id,
)
self.session.init_log(self.logger)
if alt_session:
@@ -57,7 +57,7 @@ class Crb(object):
name + "_alt",
self.get_username(),
self.get_password(),
- dut_id,
+ sut_id,
)
self.alt_session.init_log(self.logger)
else:
@@ -65,19 +65,19 @@ class Crb(object):
def get_ip_address(self):
"""
- Get CRB's ip address.
+ Get Node's ip address.
"""
raise NotImplementedError
def get_password(self):
"""
- Get CRB's login password.
+ Get Node's login password.
"""
raise NotImplementedError
def get_username(self):
"""
- Get CRB's login username.
+ Get Node's login username.
"""
raise NotImplementedError
@@ -91,7 +91,7 @@ class Crb(object):
trim_whitespace=True,
):
"""
- Send commands to crb and return string before expected string. If
+ Send commands to node and return string before expected string. If
there's no expected string found before timeout, TimeoutException will
be raised.
@@ -102,7 +102,7 @@ class Crb(object):
if trim_whitespace:
expected = expected.strip()
- # sometimes there will be no alt_session like VM dut
+ # sometimes there will be no alt_session like VM SUT
if alt_session and self.alt_session:
return self.alt_session.session.send_expect(cmds, expected, timeout, verify)
@@ -118,7 +118,7 @@ class Crb(object):
name,
self.get_username(),
self.get_password(),
- dut_id=self.dut_id,
+ sut_id=self.sut_id,
)
session.init_log(logger)
self.sessions.append(session)
@@ -169,7 +169,7 @@ class Crb(object):
def send_command(self, cmds, timeout=TIMEOUT, alt_session=False):
"""
- Send commands to crb and return string before timeout.
+ Send commands to node and return string before timeout.
"""
if alt_session and self.alt_session:
@@ -192,7 +192,7 @@ class Crb(object):
def get_total_huge_pages(self):
"""
- Get the huge page number of CRB.
+ Get the huge page number of Node.
"""
huge_pages = self.send_expect(
"awk '/HugePages_Total/ { print $2 }' /proc/meminfo", "# ", alt_session=True
@@ -203,7 +203,7 @@ class Crb(object):
def mount_huge_pages(self):
"""
- Mount hugepage file system on CRB.
+ Mount hugepage file system on Node.
"""
self.send_expect("umount `awk '/hugetlbfs/ { print $2 }' /proc/mounts`", "# ")
out = self.send_expect("awk '/hugetlbfs/ { print $2 }' /proc/mounts", "# ")
@@ -246,7 +246,7 @@ class Crb(object):
)
self.default_hugepages_cleared = True
- # some platform not support numa, example vm dut
+ # some platform not support numa, example VM SUT
try:
self.send_expect(
"echo %d > /sys/devices/system/node/%s/hugepages/hugepages-%skB/nr_hugepages"
@@ -300,7 +300,7 @@ class Crb(object):
def pci_devices_information(self):
"""
- Scan CRB pci device information and save it into cache file.
+ Scan Node pci device information and save it into cache file.
"""
if self.read_cache:
self.pci_devices_info = self.serializer.load(self.PCI_DEV_CACHE_KEY)
@@ -311,7 +311,7 @@ class Crb(object):
def pci_devices_information_uncached(self):
"""
- Scan CRB NIC's information on different OS.
+ Scan Node NIC's information on different OS.
"""
pci_devices_information_uncached = getattr(
self, "pci_devices_information_uncached_%s" % self.get_os_type()
@@ -329,29 +329,29 @@ class Crb(object):
self.pci_devices_info = []
obj_str = str(self)
- if "VirtDut" in obj_str:
+ if "VirtSut" in obj_str:
# there is no port.cfg in VM, so need to scan all pci in VM.
pass
else:
# only scan configured pcis
portconf = PortConf(PORTCONF)
- portconf.load_ports_config(self.crb["IP"])
+ portconf.load_ports_config(self.node["IP"])
configed_pcis = portconf.get_ports_config()
if configed_pcis:
- if "tester" in str(self):
- tester_pci_in_cfg = []
+ if "Traffic" in str(self):
+ tg_pci_in_cfg = []
for item in list(configed_pcis.values()):
for pci_info in match:
if item["peer"] == pci_info[0]:
- tester_pci_in_cfg.append(pci_info)
- match = tester_pci_in_cfg[:]
+ tg_pci_in_cfg.append(pci_info)
+ match = tg_pci_in_cfg[:]
else:
- dut_pci_in_cfg = []
+ sut_pci_in_cfg = []
for key in list(configed_pcis.keys()):
for pci_info in match:
if key == pci_info[0]:
- dut_pci_in_cfg.append(pci_info)
- match = dut_pci_in_cfg[:]
+ sut_pci_in_cfg.append(pci_info)
+ match = sut_pci_in_cfg[:]
# keep the original pci sequence
match = sorted(match)
else:
@@ -521,7 +521,7 @@ class Crb(object):
def create_file(self, contents, fileName):
"""
- Create file with contents and copy it to CRB.
+ Create file with contents and copy it to Node.
"""
with open(fileName, "w") as f:
f.write(contents)
@@ -529,17 +529,17 @@ class Crb(object):
def check_trex_process_existed(self):
"""
- if the tester and dut on same server
- and pktgen is trex, do not kill the process
+ if the TG and SUT on same server
+ and traffic generator is trex, do not kill the process
"""
if (
- "pktgen" in self.crb
- and (self.crb["pktgen"] is not None)
- and (self.crb["pktgen"].lower() == "trex")
+ PERF_TG_CONF_KEY in self.node
+ and (self.node[PERF_TG_CONF_KEY] is not None)
+ and (self.node[PERF_TG_CONF_KEY].lower() == "trex")
):
- if self.crb["IP"] == self.crb["tester IP"] and self.trex_prefix is None:
- conf_inst = PktgenConf("trex")
- conf_info = conf_inst.load_pktgen_config()
+ if self.node["IP"] == self.node["tg IP"] and self.trex_prefix is None:
+ conf_inst = TrafficGeneratorConf("trex")
+ conf_info = conf_inst.load_tg_config()
if "config_file" in conf_info:
config_file = conf_info["config_file"]
else:
@@ -554,7 +554,7 @@ class Crb(object):
def get_dpdk_pids(self, prefix_list, alt_session):
"""
- get all dpdk applications on CRB.
+ get all dpdk applications on Node.
"""
trex_prefix = self.check_trex_process_existed()
if trex_prefix is not None and trex_prefix in prefix_list:
@@ -614,19 +614,19 @@ class Crb(object):
def kill_all(self, alt_session=True):
"""
- Kill all dpdk applications on CRB.
+ Kill all dpdk applications on Node.
"""
- if "tester" in str(self):
- self.logger.info("kill_all: called by tester")
+ if "Traffic" in str(self):
+ self.logger.info("kill_all: called by tg")
pass
else:
if self.prefix_list:
- self.logger.info("kill_all: called by dut and prefix list has value.")
+ self.logger.info("kill_all: called by SUT and prefix list has value.")
self.get_dpdk_pids(self.prefix_list, alt_session)
# init prefix_list
self.prefix_list = []
else:
- self.logger.info("kill_all: called by dut and has no prefix list.")
+ self.logger.info("kill_all: called by SUT and has no prefix list.")
out = self.send_command(
"ls -l /var/run/dpdk |awk '/^d/ {print $NF}'",
timeout=0.5,
@@ -639,7 +639,7 @@ class Crb(object):
def close(self):
"""
- Close ssh session of CRB.
+ Close ssh session of Node.
"""
self.session.close()
self.alt_session.close()
@@ -648,10 +648,10 @@ class Crb(object):
"""
Get OS type from execution configuration file.
"""
- from .dut import Dut
+ from .sut_node import SutNode
- if isinstance(self, Dut) and "OS" in self.crb:
- return str(self.crb["OS"]).lower()
+ if isinstance(self, SutNode) and "OS" in self.node:
+ return str(self.node["OS"]).lower()
return "linux"
@@ -659,17 +659,17 @@ class Crb(object):
"""
Check real OS type whether match configured type.
"""
- from .dut import Dut
+ from .sut_node import SutNode
expected = "Linux.*#"
- if isinstance(self, Dut) and self.get_os_type() == "freebsd":
+ if isinstance(self, SutNode) and self.get_os_type() == "freebsd":
expected = "FreeBSD.*#"
self.send_expect("uname", expected, 2, alt_session=True)
def init_core_list(self):
"""
- Load or create core information of CRB.
+ Load or create core information of Node.
"""
if self.read_cache:
self.number_of_cores = self.serializer.load(self.NUMBER_CORES_CACHE_KEY)
@@ -682,7 +682,7 @@ class Crb(object):
def init_core_list_uncached(self):
"""
- Scan cores on CRB and create core information list.
+ Scan cores on Node and create core information list.
"""
init_core_list_uncached = getattr(
self, "init_core_list_uncached_%s" % self.get_os_type()
@@ -712,7 +712,7 @@ class Crb(object):
for core in core_elements:
threads = [int(x) for x in core.text.split(",")]
for thread in threads:
- if self.crb["bypass core0"] and socket_id == 0 and core_id == 0:
+ if self.node["bypass core0"] and socket_id == 0 and core_id == 0:
continue
self.cores.append(
{"socket": socket_id, "core": core_id, "thread": thread}
@@ -744,12 +744,12 @@ class Crb(object):
coremap[core] = core_id
core_id += 1
- if self.crb["bypass core0"] and core == "0" and socket == "0":
+ if self.node["bypass core0"] and core == "0" and socket == "0":
self.logger.info("Core0 bypassed")
continue
if (
- self.crb.get("dut arch") == "arm64"
- or self.crb.get("dut arch") == "ppc64"
+ self.node.get("sut arch") == "arm64"
+ or self.node.get("sut arch") == "ppc64"
):
self.cores.append(
{"thread": thread, "socket": node, "core": coremap[core]}
diff --git a/framework/pktgen.py b/framework/pktgen.py
deleted file mode 100644
index 5e53a4f2..00000000
--- a/framework/pktgen.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2010-2021 Intel Corporation
-#
-
-import os
-from copy import deepcopy
-
-from scapy.all import conf
-from scapy.fields import ConditionalField
-from scapy.packet import NoPayload
-from scapy.packet import Packet as scapyPacket
-from scapy.utils import rdpcap
-
-from .pktgen_base import (
- PKTGEN_DPDK,
- PKTGEN_IXIA,
- PKTGEN_IXIA_NETWORK,
- PKTGEN_TREX,
- STAT_TYPE,
- TRANSMIT_CONT,
- TRANSMIT_M_BURST,
- TRANSMIT_S_BURST,
- DpdkPacketGenerator,
-)
-from .pktgen_ixia import IxiaPacketGenerator
-from .pktgen_ixia_network import IxNetworkPacketGenerator
-from .pktgen_trex import TrexPacketGenerator
-
-# dts libs
-from .utils import convert_int2ip, convert_ip2int, convert_mac2long, convert_mac2str
-
-
-class PacketGeneratorHelper(object):
- """default packet generator stream option for all streams"""
-
- default_opt = {
- "stream_config": {
- "txmode": {},
- "transmit_mode": TRANSMIT_CONT,
- # for temporary usage because current pktgen design don't support
- # port level configuration, here using stream configuration to pass
- # rate percent
- "rate": 100,
- }
- }
-
- def __init__(self):
- self.packetLayers = dict()
-
- def _parse_packet_layer(self, pkt_object):
- """parse one packet every layers' fields and value"""
- if pkt_object == None:
- return
-
- self.packetLayers[pkt_object.name] = dict()
- for curfield in pkt_object.fields_desc:
- if isinstance(curfield, ConditionalField) and not curfield._evalcond(
- pkt_object
- ):
- continue
- field_value = pkt_object.getfieldval(curfield.name)
- if isinstance(field_value, scapyPacket) or (
- curfield.islist and curfield.holds_packets and type(field_value) is list
- ):
- continue
- repr_value = curfield.i2repr(pkt_object, field_value)
- if isinstance(repr_value, str):
- repr_value = repr_value.replace(
- os.linesep, os.linesep + " " * (len(curfield.name) + 4)
- )
- self.packetLayers[pkt_object.name][curfield.name] = repr_value
-
- if isinstance(pkt_object.payload, NoPayload):
- return
- else:
- self._parse_packet_layer(pkt_object.payload)
-
- def _parse_pcap(self, pcapFile, number=0):
- """parse one packet content"""
- pcap_pkts = []
- if os.path.exists(pcapFile) == False:
- warning = "{0} is not exist !".format(pcapFile)
- raise Exception(warning)
-
- pcap_pkts = rdpcap(pcapFile)
- # parse packets' every layers and fields
- if len(pcap_pkts) == 0:
- warning = "{0} is empty".format(pcapFile)
- raise Exception(warning)
- elif number >= len(pcap_pkts):
- warning = "{0} is missing No.{1} packet".format(pcapFile, number)
- raise Exception(warning)
- else:
- self._parse_packet_layer(pcap_pkts[number])
-
- def _set_pktgen_fields_config(self, pcap, suite_config):
- """
- get default fields value from a pcap file and unify layer fields
- variables for trex/ixia
- """
- self._parse_pcap(pcap)
- if not self.packetLayers:
- msg = "pcap content is empty"
- raise Exception(msg)
- # suite fields config convert to pktgen fields config
- fields_config = {}
- # set ethernet protocol layer fields
- layer_name = "mac"
- if layer_name in list(suite_config.keys()) and "Ethernet" in self.packetLayers:
- fields_config[layer_name] = {}
- suite_fields = suite_config.get(layer_name)
- pcap_fields = self.packetLayers.get("Ethernet")
- for name, config in suite_fields.items():
- action = config.get("action") or "default"
- range = config.get("range") or 64
- step = config.get("step") or 1
- start_mac = pcap_fields.get(name)
- end_mac = convert_mac2str(convert_mac2long(start_mac) + range - 1)
- fields_config[layer_name][name] = {}
- fields_config[layer_name][name]["start"] = start_mac
- fields_config[layer_name][name]["end"] = end_mac
- fields_config[layer_name][name]["step"] = step
- fields_config[layer_name][name]["action"] = action
- # set ip protocol layer fields
- layer_name = "ip"
- if layer_name in list(suite_config.keys()) and "IP" in self.packetLayers:
- fields_config[layer_name] = {}
- suite_fields = suite_config.get(layer_name)
- pcap_fields = self.packetLayers.get("IP")
- for name, config in suite_fields.items():
- action = config.get("action") or "default"
- range = config.get("range") or 64
- step = config.get("step") or 1
- start_ip = pcap_fields.get(name)
- end_ip = convert_int2ip(convert_ip2int(start_ip) + range - 1)
- fields_config[layer_name][name] = {}
- fields_config[layer_name][name]["start"] = start_ip
- fields_config[layer_name][name]["end"] = end_ip
- fields_config[layer_name][name]["step"] = step
- fields_config[layer_name][name]["action"] = action
- # set vlan protocol layer fields, only support one layer vlan here
- layer_name = "vlan"
- if layer_name in list(suite_config.keys()) and "802.1Q" in self.packetLayers:
- fields_config[layer_name] = {}
- suite_fields = suite_config.get(layer_name)
- pcap_fields = self.packetLayers.get("802.1Q")
- # only support one layer vlan here, so set name to `0`
- name = 0
- if name in list(suite_fields.keys()):
- config = suite_fields[name]
- action = config.get("action") or "default"
- range = config.get("range") or 64
- # ignore 'L' suffix
- if "L" in pcap_fields.get(layer_name):
- start_vlan = int(pcap_fields.get(layer_name)[:-1])
- else:
- start_vlan = int(pcap_fields.get(layer_name))
- end_vlan = start_vlan + range - 1
- fields_config[layer_name][name] = {}
- fields_config[layer_name][name]["start"] = start_vlan
- fields_config[layer_name][name]["end"] = end_vlan
- fields_config[layer_name][name]["step"] = 1
- fields_config[layer_name][name]["action"] = action
-
- return fields_config
-
- def prepare_stream_from_tginput(
- self, tgen_input, ratePercent, vm_config, pktgen_inst
- ):
- """create streams for ports, one port one stream"""
- # set stream in pktgen
- stream_ids = []
- for config in tgen_input:
- stream_id = pktgen_inst.add_stream(*config)
- pcap = config[2]
- _options = deepcopy(self.default_opt)
- _options["pcap"] = pcap
- _options["stream_config"]["rate"] = ratePercent
- # if vm is set
- if vm_config:
- _options["fields_config"] = self._set_pktgen_fields_config(
- pcap, vm_config
- )
- pktgen_inst.config_stream(stream_id, _options)
- stream_ids.append(stream_id)
- return stream_ids
-
-
-def getPacketGenerator(tester, pktgen_type=PKTGEN_IXIA):
- """
- Get packet generator object
- """
- pktgen_type = pktgen_type.lower()
-
- pktgen_cls = {
- PKTGEN_DPDK: DpdkPacketGenerator,
- PKTGEN_IXIA: IxiaPacketGenerator,
- PKTGEN_IXIA_NETWORK: IxNetworkPacketGenerator,
- PKTGEN_TREX: TrexPacketGenerator,
- }
-
- if pktgen_type in list(pktgen_cls.keys()):
- CLS = pktgen_cls.get(pktgen_type)
- return CLS(tester)
- else:
- msg = "not support <{0}> packet generator".format(pktgen_type)
- raise Exception(msg)
diff --git a/framework/plotting.py b/framework/plotting.py
index 9a42a00c..89c6da89 100644
--- a/framework/plotting.py
+++ b/framework/plotting.py
@@ -51,13 +51,13 @@ class Plotting(object):
default_line_styles = ["--"]
- def __init__(self, crb, target, nic):
+ def __init__(self, node, target, nic):
# Ensure the folder exist
try:
path = "/".join(
- [Plotting.path_2_result, crb, target, nic, Plotting.plots_subfolder]
+ [Plotting.path_2_result, node, target, nic, Plotting.plots_subfolder]
)
if not os.path.exists(path):
@@ -68,7 +68,7 @@ class Plotting(object):
except Exception as e:
raise VerifyFailure("Plot Error: " + str(e))
- def clear_all_plots(self, crb, target):
+ def clear_all_plots(self, node, target):
shutil.rmtree(self.plots_path, True)
def create_bars_plot(
diff --git a/framework/pmd_output.py b/framework/pmd_output.py
index c8e8b50f..b13e0200 100644
--- a/framework/pmd_output.py
+++ b/framework/pmd_output.py
@@ -16,12 +16,12 @@ class PmdOutput:
Module for get all statics value by port in testpmd
"""
- def __init__(self, dut, session=None):
- self.dut = dut
+ def __init__(self, sut_node, session=None):
+ self.sut_node = sut_node
if session is None:
- session = dut
+ session = sut_node
self.session = session
- self.dut.testpmd = self
+ self.sut_node.testpmd = self
self.rx_pkts_prefix = "RX-packets:"
self.rx_missed_prefix = "RX-missed:"
self.rx_bytes_prefix = "RX-bytes:"
@@ -48,9 +48,9 @@ class PmdOutput:
"""
set default cores for start testpmd
"""
- core_number = len(self.dut.cores)
+ core_number = len(self.sut_node.cores)
if core_number < 2:
- raise ValueError(f"Not enough cores on DUT {self.dut}")
+ raise ValueError(f"Not enough cores on SUT {self.sut_node}")
else:
self.default_cores = "1S/2C/1T"
@@ -133,17 +133,17 @@ class PmdOutput:
)
):
config["ports"] = [
- self.dut.ports_info[i]["pci"] for i in range(len(self.dut.ports_info))
+ self.sut_node.ports_info[i]["pci"] for i in range(len(self.sut_node.ports_info))
]
- all_eal_param = self.dut.create_eal_parameters(
+ all_eal_param = self.sut_node.create_eal_parameters(
fixed_prefix=fixed_prefix, socket=socket, **config
)
- app_name = self.dut.apps_name["test-pmd"]
+ app_name = self.sut_node.apps_name["test-pmd"]
command = app_name + " %s -- -i %s" % (all_eal_param, param)
command = command.replace(" ", " ")
- if self.session != self.dut:
- self.session.send_expect("cd %s" % self.dut.base_dir, "# ")
+ if self.session != self.sut_node:
+ self.session.send_expect("cd %s" % self.sut_node.base_dir, "# ")
out = self.session.send_expect(command, expected, timeout)
self.command = command
# wait 10s to ensure links getting up before test start.
@@ -153,7 +153,7 @@ class PmdOutput:
def execute_cmd(
self, pmd_cmd, expected="testpmd> ", timeout=TIMEOUT, alt_session=False
):
- if "dut" in str(self.session):
+ if "Sut" in str(self.session):
return self.session.send_expect(
"%s" % pmd_cmd, expected, timeout=timeout, alt_session=alt_session
)
@@ -161,7 +161,7 @@ class PmdOutput:
return self.session.send_expect("%s" % pmd_cmd, expected, timeout=timeout)
def get_output(self, timeout=1):
- if "dut" in str(self.session):
+ if "Sut" in str(self.session):
return self.session.get_session_output(timeout=timeout)
else:
return self.session.get_session_before(timeout=timeout)
diff --git a/framework/project_dpdk.py b/framework/project_dpdk.py
index c20aa044..b2b7be37 100644
--- a/framework/project_dpdk.py
+++ b/framework/project_dpdk.py
@@ -5,9 +5,8 @@
import os
import re
-from .crb import Crb
-from .dut import Dut
from .logger import getLogger
+from .node import Node
from .settings import (
CONFIG_ROOT_PATH,
DPDK_RXMODE_SETTING,
@@ -22,26 +21,27 @@ from .settings import (
save_global_setting,
)
from .ssh_connection import SSHConnection
-from .tester import Tester
+from .sut_node import SutNode
+from .tg_node import TrafficGeneratorNode
from .utils import RED
-class DPDKdut(Dut):
+class DPDKSut(SutNode):
"""
- DPDK project class for DUT. DTS will call set_target function to setup
+ DPDK project class for SUT. DTS will call set_target function to setup
build, memory and kernel module.
"""
- def __init__(self, crb, serializer, dut_id=0, name=None, alt_session=True):
- super(DPDKdut, self).__init__(crb, serializer, dut_id, name, alt_session)
+ def __init__(self, node, serializer, sut_id=0, name=None, alt_session=True):
+ super(DPDKSut, self).__init__(node, serializer, sut_id, name, alt_session)
self.testpmd = None
def set_target(self, target, bind_dev=True):
"""
Set env variable, these have to be setup all the time. Some tests
need to compile example apps by themselves and will fail otherwise.
- Set hugepage on DUT and install modules required by DPDK.
+ Set hugepage on SUT and install modules required by DPDK.
Configure default ixgbe PMD function.
"""
self.target = target
@@ -54,7 +54,7 @@ class DPDKdut(Dut):
self.set_rxtx_mode()
self.apps_name = self.apps_name_conf["meson"]
- # use the dut target directory instead of 'target' string in app name
+ # use the SUT target directory instead of 'target' string in app name
for app in self.apps_name:
cur_app_path = self.apps_name[app].replace("target", self.target)
self.apps_name[app] = cur_app_path + " "
@@ -80,7 +80,7 @@ class DPDKdut(Dut):
def setup_modules(self, target, drivername, drivermode):
"""
- Install DPDK required kernel module on DUT.
+ Install DPDK required kernel module on SUT.
"""
setup_modules = getattr(self, "setup_modules_%s" % self.get_os_type())
setup_modules(target, drivername, drivermode)
@@ -124,7 +124,7 @@ class DPDKdut(Dut):
def setup_modules_freebsd(self, target, drivername, drivermode):
"""
- Install DPDK required Freebsd kernel module on DUT.
+ Install DPDK required Freebsd kernel module on SUT.
"""
binding_list = ""
@@ -140,14 +140,14 @@ class DPDKdut(Dut):
def restore_modules(self):
"""
- Restore DPDK kernel module on DUT.
+ Restore DPDK kernel module on SUT.
"""
restore_modules = getattr(self, "restore_modules_%s" % self.get_os_type())
restore_modules()
def restore_modules_linux(self):
"""
- Restore DPDK Linux kernel module on DUT.
+ Restore DPDK Linux kernel module on SUT.
"""
drivername = load_global_setting(HOST_DRIVER_SETTING)
if drivername == "vfio-pci":
@@ -161,7 +161,7 @@ class DPDKdut(Dut):
def restore_modules_freebsd(self):
"""
- Restore DPDK Freebsd kernel module on DUT.
+ Restore DPDK Freebsd kernel module on SUT.
"""
pass
@@ -177,7 +177,7 @@ class DPDKdut(Dut):
out = self.send_expect("lscpu | grep avx512", "#")
if "avx512f" not in out or "no-avx512f" in out:
self.logger.warning(
- RED("*********The DUT CPU do not support AVX512 test!!!********")
+ RED("*********The SUT CPU do not support AVX512 test!!!********")
)
self.logger.warning(
RED("*********Now set the rx_mode to default!!!**********")
@@ -299,16 +299,16 @@ class DPDKdut(Dut):
def prepare_package(self):
if not self.skip_setup:
session_info = None
- # if snapshot_load_side=dut, will copy the dpdk tar from dut side
- # and will judge whether the path of tar is existed on dut
- if self.crb["snapshot_load_side"] == "dut":
+ # if snapshot_load_side=sut, will copy the dpdk tar from SUT side
+ # and will judge whether the path of tar is existed on SUT
+ if self.node["snapshot_load_side"] == "sut":
if not os.path.isabs(self.package):
raise ValueError(
- "As snapshot_load_side=dut, will copy dpdk.tar "
- "from dut, please specify a abs path use params "
+ "As snapshot_load_side=sut, will copy dpdk.tar "
+ "from SUT, please specify a abs path use params "
"--snapshot when run dts"
)
- # if ':' in session, this is vm dut, use the dut session
+ # if ':' in session, this is vm SUT, use the SUT session
if ":" in self.session.name:
session_info = self.host_session
else:
@@ -334,14 +334,14 @@ class DPDKdut(Dut):
"Directory %s or %s does not exist,"
"please check params -d" % (p_dir, dst_dir)
)
- self.session.copy_file_to(self.package, dst_dir, crb_session=session_info)
+ self.session.copy_file_to(self.package, dst_dir, node_session=session_info)
# put patches to p_dir/patches/
if self.patches is not None:
for p in self.patches:
self.session.copy_file_to("dep/" + p, dst_dir)
- # copy QMP file to dut
+ # copy QMP file to SUT
if ":" not in self.session.name:
out = self.send_expect("ls -d ~/QMP", "# ", verify=True)
if isinstance(out, int):
@@ -385,10 +385,10 @@ class DPDKdut(Dut):
def prerequisites(self):
"""
- Copy DPDK package to DUT and apply patch files.
+ Copy DPDK package to SUT and apply patch files.
"""
self.prepare_package()
- self.dut_prerequisites()
+ self.sut_prerequisites()
self.stage = "post-init"
def extra_nic_setup(self):
@@ -519,12 +519,12 @@ class DPDKdut(Dut):
Get block list command string on Linux.
"""
blocklist = ""
- dutPorts = self.get_ports(nic)
+ sutPorts = self.get_ports(nic)
self.restore_interfaces()
self.send_expect("insmod ./%s/kmod/igb_uio.ko" % target, "# ")
self.bind_interfaces_linux()
for port in range(0, len(self.ports_info)):
- if port not in dutPorts:
+ if port not in sutPorts:
blocklist += "-b %s " % self.ports_info[port]["pci"]
return blocklist
@@ -537,20 +537,20 @@ class DPDKdut(Dut):
return blocklist
-class DPDKtester(Tester):
+class DPDKTrafficGenerator(TrafficGeneratorNode):
"""
- DPDK project class for tester. DTS will call prerequisites function to setup
- interface and generate port map.
+ DPDK project class for traffic generator. DTS will call prerequisites
+ function to setup interfaces and generate port map.
"""
- def __init__(self, crb, serializer, dut_id):
- self.NAME = "tester"
- super(DPDKtester, self).__init__(crb, serializer)
+ def __init__(self, node, serializer, sut_id):
+ self.NAME = "tg"
+ super(DPDKTrafficGenerator, self).__init__(node, serializer)
def prerequisites(self, perf_test=False):
"""
- Setup hugepage on tester and copy validation required files to tester.
+ Setup hugepage on TG and copy validation required files to TG.
"""
self.kill_all()
@@ -577,7 +577,7 @@ class DPDKtester(Tester):
self.send_expect("modprobe uio", "# ")
- self.tester_prerequisites()
+ self.tg_prerequisites()
self.set_promisc()
@@ -585,7 +585,7 @@ class DPDKtester(Tester):
def setup_memory(self, hugepages=-1):
"""
- Setup hugepage on tester.
+ Setup hugepage on TG.
"""
hugepages_size = self.send_expect(
"awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# "
diff --git a/framework/qemu_kvm.py b/framework/qemu_kvm.py
index 20aa8008..2fb4ab15 100644
--- a/framework/qemu_kvm.py
+++ b/framework/qemu_kvm.py
@@ -51,7 +51,7 @@ def handle_control_session(func):
)
)
print(RED(e))
- self.close_control_session(dut_id=self.host_dut.dut_id)
+ self.close_control_session(sut_id=self.host_sut.sut_id)
return "Failed"
return _handle_control_session
@@ -89,8 +89,8 @@ class QEMUKvm(VirtBase):
# Default password prompt
PASSWORD_PROMPT = "Password:"
- def __init__(self, dut, vm_name, suite_name):
- super(QEMUKvm, self).__init__(dut, vm_name, suite_name)
+ def __init__(self, sut, vm_name, suite_name):
+ super(QEMUKvm, self).__init__(sut, vm_name, suite_name)
# initialize qemu emulator, example: qemu-system-x86_64
self.qemu_emulator = self.get_qemu_emulator()
@@ -112,8 +112,8 @@ class QEMUKvm(VirtBase):
self.pci_maps = []
# default login user,password
- self.username = dut.crb["user"]
- self.password = dut.crb["pass"]
+ self.username = sut.node["user"]
+ self.password = sut.node["pass"]
# internal variable to track whether default nic has been added
self.__default_nic = False
@@ -199,7 +199,7 @@ class QEMUKvm(VirtBase):
def get_qemu_emulator(self):
"""
- Get the qemu emulator based on the crb.
+ Get the qemu emulator based on the node.
"""
arch = self.host_session.send_expect("uname -m", "# ")
return "qemu-system-" + arch
@@ -211,8 +211,8 @@ class QEMUKvm(VirtBase):
out = self.host_session.send_expect("ls %s" % qemu_emulator_path, "# ")
if "No such file or directory" in out:
self.host_logger.error(
- "No emulator [ %s ] on the DUT [ %s ]"
- % (qemu_emulator_path, self.host_dut.get_ip_address())
+ "No emulator [ %s ] on the SUT [ %s ]"
+ % (qemu_emulator_path, self.host_sut.get_ip_address())
)
return None
out = self.host_session.send_expect(
@@ -220,8 +220,8 @@ class QEMUKvm(VirtBase):
)
if out != "0":
self.host_logger.error(
- "Emulator [ %s ] not executable on the DUT [ %s ]"
- % (qemu_emulator_path, self.host_dut.get_ip_address())
+ "Emulator [ %s ] not executable on the SUT [ %s ]"
+ % (qemu_emulator_path, self.host_sut.get_ip_address())
)
return None
self.qemu_emulator = qemu_emulator_path
@@ -395,7 +395,7 @@ class QEMUKvm(VirtBase):
mem_boot_huge = (
"-object memory-backend-file,"
+ "id=mem,size=%sM,mem-path=%s,share=on"
- % (options["size"], self.host_dut.hugepage_path)
+ % (options["size"], self.host_sut.hugepage_path)
)
self.__add_boot_line(mem_boot_huge)
@@ -595,7 +595,7 @@ class QEMUKvm(VirtBase):
# get the host addr
host_addr = field(opt_hostfwd, 1)
if not host_addr:
- addr = str(self.host_dut.get_ip_address())
+ addr = str(self.host_sut.get_ip_address())
host_addr = get_host_ip(addr)
# get the host port in the option
@@ -690,7 +690,7 @@ class QEMUKvm(VirtBase):
qemu_ifup = self.QEMU_IFUP % {"switch": switch}
file_name = os.path.basename(self.QEMU_IFUP_PATH)
tmp_file_path = "/tmp/%s" % file_name
- self.host_dut.create_file(qemu_ifup, tmp_file_path)
+ self.host_sut.create_file(qemu_ifup, tmp_file_path)
self.host_session.send_expect(
"mv -f ~/%s %s" % (file_name, self.QEMU_IFUP_PATH), "# "
)
@@ -1035,7 +1035,7 @@ class QEMUKvm(VirtBase):
if len(out) == 0:
raise StartVMFailedException(
"Can't get output from [%s:%s]"
- % (self.host_dut.crb["My IP"], self.vm_name)
+ % (self.host_sut.node["My IP"], self.vm_name)
)
m = re.match(shell_reg, out)
@@ -1075,10 +1075,10 @@ class QEMUKvm(VirtBase):
print(
RED(
"[%s:%s] exception [%s] happened"
- % (self.host_dut.crb["My IP"], self.vm_name, str(e))
+ % (self.host_sut.node["My IP"], self.vm_name, str(e))
)
)
- self.close_control_session(dut_id=self.host_dut.dut_id)
+ self.close_control_session(sut_id=self.host_sut.sut_id)
return False
def connect_telnet_port(self, name=""):
@@ -1137,7 +1137,7 @@ class QEMUKvm(VirtBase):
print(
RED(
"[%s:%s] not ready for login"
- % (self.host_dut.crb["My IP"], self.vm_name)
+ % (self.host_sut.node["My IP"], self.vm_name)
)
)
return False
@@ -1154,10 +1154,10 @@ class QEMUKvm(VirtBase):
print(
RED(
"[%s:%s] exception [%s] happened"
- % (self.host_dut.crb["My IP"], self.vm_name, str(e))
+ % (self.host_sut.node["My IP"], self.vm_name, str(e))
)
)
- self.close_control_session(dut_id=self.host_dut.dut_id)
+ self.close_control_session(sut_id=self.host_sut.sut_id)
return False
def connect_qga_port(self, name=""):
@@ -1181,10 +1181,10 @@ class QEMUKvm(VirtBase):
# when exception happened, force close qga process and reconnect
print(
RED(
- "[%s:%s] QGA not ready" % (self.host_dut.crb["My IP"], self.vm_name)
+ "[%s:%s] QGA not ready" % (self.host_sut.node["My IP"], self.vm_name)
)
)
- self.close_control_session(dut_id=self.host_dut.dut_id)
+ self.close_control_session(sut_id=self.host_sut.sut_id)
return False
def add_vm_vnc(self, **options):
@@ -1373,7 +1373,7 @@ class QEMUKvm(VirtBase):
qemu_boot_line = self.generate_qemu_boot_line()
- self.__send_qemu_cmd(qemu_boot_line, dut_id=self.host_dut.dut_id)
+ self.__send_qemu_cmd(qemu_boot_line, sut_id=self.host_sut.sut_id)
self.__get_pci_mapping()
@@ -1392,7 +1392,7 @@ class QEMUKvm(VirtBase):
# Start VM using the qemu command
# lock critical action like start qemu
@parallel_lock(num=4)
- def __send_qemu_cmd(self, qemu_boot_line, dut_id):
+ def __send_qemu_cmd(self, qemu_boot_line, sut_id):
# add more time for qemu start will be slow when system is busy
ret = self.host_session.send_expect(
qemu_boot_line, "# ", verify=True, timeout=30
@@ -1411,7 +1411,7 @@ class QEMUKvm(VirtBase):
qemu_boot_line = self.generate_qemu_boot_line()
- self.__send_qemu_cmd(qemu_boot_line, dut_id=self.host_dut.dut_id)
+ self.__send_qemu_cmd(qemu_boot_line, sut_id=self.host_sut.sut_id)
self.__get_pci_mapping()
@@ -1437,7 +1437,7 @@ class QEMUKvm(VirtBase):
self.host_logger.warning(
"Can't login [%s] on [%s], retry %d times!!!"
- % (self.vm_name, self.host_dut.crb["My IP"], try_times + 1)
+ % (self.vm_name, self.host_sut.node["My IP"], try_times + 1)
)
time.sleep(self.OPERATION_TIMEOUT)
try_times += 1
@@ -1450,7 +1450,7 @@ class QEMUKvm(VirtBase):
if not logged_in:
if not self.restarted:
# make sure serial session has been quit
- self.close_control_session(dut_id=self.host_dut.dut_id)
+ self.close_control_session(sut_id=self.host_sut.sut_id)
self.vm_status = ST_NOTSTART
self._stop_vm()
self.restarted = True
@@ -1567,7 +1567,7 @@ class QEMUKvm(VirtBase):
self.host_logger.warning(
"[%s] on [%s] network not ready, retry %d times!!!"
- % (self.vm_name, self.host_dut.crb["My IP"], try_times + 1)
+ % (self.vm_name, self.host_sut.node["My IP"], try_times + 1)
)
time.sleep(self.OPERATION_TIMEOUT)
try_times += 1
@@ -1639,7 +1639,7 @@ class QEMUKvm(VirtBase):
"""
Check if the specified PCI dev is a VF.
"""
- for port_info in self.host_dut.ports_info:
+ for port_info in self.host_sut.ports_info:
if "sriov_vfs_pci" in list(port_info.keys()):
if dev_pci in port_info["sriov_vfs_pci"]:
return True
@@ -1649,7 +1649,7 @@ class QEMUKvm(VirtBase):
"""
Map the specified VF to PF.
"""
- for port_info in self.host_dut.ports_info:
+ for port_info in self.host_sut.ports_info:
if "sriov_vfs_pci" in list(port_info.keys()):
if dev_pci in port_info["sriov_vfs_pci"]:
return port_info["pci"]
@@ -1659,7 +1659,7 @@ class QEMUKvm(VirtBase):
"""
Get the NetDevice instance of specified VF.
"""
- for port_info in self.host_dut.ports_info:
+ for port_info in self.host_sut.ports_info:
if "vfs_port" in list(port_info.keys()):
for port in port_info["vfs_port"]:
if dev_pci == port.pci:
@@ -1682,7 +1682,7 @@ class QEMUKvm(VirtBase):
"""
Get NetDevice instance by the specified PCI bus number.
"""
- port_info = self.host_dut.get_port_info(net_device_pci)
+ port_info = self.host_sut.get_port_info(net_device_pci)
return port_info["port"]
def get_vm_ip(self):
@@ -1743,7 +1743,7 @@ class QEMUKvm(VirtBase):
if not self.monitor_sock_path:
self.host_logger.info(
"No monitor between on host [ %s ] for guest [ %s ]"
- % (self.host_dut.NAME, self.vm_name)
+ % (self.host_sut.NAME, self.vm_name)
)
return None
@@ -1779,8 +1779,8 @@ class QEMUKvm(VirtBase):
info = self.host_session.send_expect("cat %s" % self.__pid_file, "# ")
try:
pid = int(info.split()[0])
- # save pid into dut structure
- self.host_dut.virt_pids.append(pid)
+ # save pid into SUT structure
+ self.host_sut.virt_pids.append(pid)
except:
self.host_logger.info("Failed to capture pid!!!")
@@ -1789,7 +1789,7 @@ class QEMUKvm(VirtBase):
try:
# sometimes saw to lines in pid file
pid = int(info.splitlines()[0])
- # save pid into dut structure
+ # save pid into SUT structure
self.pid = pid
return True
except:
@@ -1863,9 +1863,9 @@ class QEMUKvm(VirtBase):
self.control_session = None
@parallel_lock()
- def close_control_session(self, dut_id):
+ def close_control_session(self, sut_id):
"""
- Force kill serial connection from DUT when exception happened
+ Force kill serial connection from SUT when exception happened
"""
# return control_session to host_session
if self.control_type == "socket":
@@ -1873,22 +1873,22 @@ class QEMUKvm(VirtBase):
"ps -e -o pid,cmd |grep 'socat %s STDIO' |grep -v grep"
% self.serial_path
)
- out = self.host_dut.send_expect(scan_cmd, "#")
+ out = self.host_sut.send_expect(scan_cmd, "#")
proc_info = out.strip().split()
try:
pid = int(proc_info[0])
- self.host_dut.send_expect("kill %d" % pid, "#")
+ self.host_sut.send_expect("kill %d" % pid, "#")
except:
pass
- self.host_dut.send_expect("", "# ")
+ self.host_sut.send_expect("", "# ")
elif self.control_type == "telnet":
scan_cmd = (
"lsof -n -i:%d | grep telnet | awk '{print $2}'" % self.serial_port
)
- proc_info = self.host_dut.send_expect(scan_cmd, "#")
+ proc_info = self.host_sut.send_expect(scan_cmd, "#")
try:
pid = int(proc_info)
- self.host_dut.send_expect("kill %d" % pid, "#")
+ self.host_sut.send_expect("kill %d" % pid, "#")
except:
pass
elif self.control_type == "qga":
@@ -1896,11 +1896,11 @@ class QEMUKvm(VirtBase):
"ps -e -o pid,cmd |grep 'address=%s' |grep -v grep"
% self.qga_socket_path
)
- out = self.host_dut.send_expect(scan_cmd, "#")
+ out = self.host_sut.send_expect(scan_cmd, "#")
proc_info = out.strip().split()
try:
pid = int(proc_info[0])
- self.host_dut.send_expect("kill %d" % pid, "#")
+ self.host_sut.send_expect("kill %d" % pid, "#")
except:
pass
diff --git a/framework/qemu_libvirt.py b/framework/qemu_libvirt.py
index cf406bad..df4e1270 100644
--- a/framework/qemu_libvirt.py
+++ b/framework/qemu_libvirt.py
@@ -12,10 +12,10 @@ from xml.etree.ElementTree import ElementTree
import framework.utils as utils
from .config import VIRTCONF, VirtConf
-from .dut import Dut
from .exception import StartVMFailedException
from .logger import getLogger
from .ssh_connection import SSHConnection
+from .sut_node import SutNode
from .virt_base import VirtBase
from .virt_resource import VirtResource
@@ -39,14 +39,14 @@ class LibvirtKvm(VirtBase):
)
QEMU_IFUP_PATH = "/etc/qemu-ifup"
- def __init__(self, dut, name, suite):
+ def __init__(self, sut, name, suite):
# initialize virtualization base module
- super(LibvirtKvm, self).__init__(dut, name, suite)
+ super(LibvirtKvm, self).__init__(sut, name, suite)
# initialize qemu emulator, example: qemu-system-x86_64
self.qemu_emulator = self.get_qemu_emulator()
- self.logger = dut.logger
+ self.logger = sut.logger
# disk and pci device default index
self.diskindex = "a"
self.controllerindex = 0
@@ -66,8 +66,8 @@ class LibvirtKvm(VirtBase):
self.pci_maps = []
# default login user,password
- self.username = self.host_dut.crb["user"]
- self.password = self.host_dut.crb["pass"]
+ self.username = self.host_sut.node["user"]
+ self.password = self.host_sut.node["pass"]
# internal variable to track whether default nic has been added
self.__default_nic = False
@@ -79,7 +79,7 @@ class LibvirtKvm(VirtBase):
def get_qemu_emulator(self):
"""
- Get the qemu emulator based on the crb.
+ Get the qemu emulator based on the node.
"""
arch = self.host_session.send_expect("uname -m", "# ")
return "/usr/bin/qemu-system-" + arch
@@ -271,7 +271,7 @@ class LibvirtKvm(VirtBase):
out = self.host_session.send_expect("ls %s" % qemu_emulator_path, "# ")
if "No such file or directory" in out:
self.host_logger.error(
- "No emulator [ %s ] on the DUT" % (qemu_emulator_path)
+ "No emulator [ %s ] on the SUT" % (qemu_emulator_path)
)
return None
out = self.host_session.send_expect(
@@ -279,7 +279,7 @@ class LibvirtKvm(VirtBase):
)
if out != "0":
self.host_logger.error(
- "Emulator [ %s ] " % qemu_emulator_path + "not executable on the DUT"
+ "Emulator [ %s ] " % qemu_emulator_path + "not executable on the SUT"
)
return None
self.qemu_emulator = qemu_emulator_path
@@ -367,7 +367,7 @@ class LibvirtKvm(VirtBase):
if displayNum
else self.virt_pool.alloc_port(self.vm_name, port_type="display")
)
- ip = self.host_dut.get_ip_address()
+ ip = self.host_sut.get_ip_address()
# set main block
graphics = {
"type": "vnc",
@@ -462,7 +462,7 @@ class LibvirtKvm(VirtBase):
qemu_ifup = self.QEMU_IFUP % {"switch": switch}
file_name = os.path.basename(self.QEMU_IFUP_PATH)
tmp_file_path = "/tmp/%s" % file_name
- self.host_dut.create_file(qemu_ifup, tmp_file_path)
+ self.host_sut.create_file(qemu_ifup, tmp_file_path)
self.host_session.send_expect(
"mv -f ~/%s %s" % (file_name, self.QEMU_IFUP_PATH), "# "
)
@@ -687,8 +687,8 @@ class LibvirtKvm(VirtBase):
port = self.virt_pool.alloc_port(self.vm_name)
if port is None:
return
- dut_ip = self.host_dut.crb["IP"]
- self.vm_ip = "%s:%d" % (dut_ip, port)
+ sut_ip = self.host_sut.node["IP"]
+ self.vm_ip = "%s:%d" % (sut_ip, port)
qemu = ET.SubElement(self.domain, "qemu:commandline")
ET.SubElement(qemu, "qemu:arg", {"value": "-net"})
@@ -711,7 +711,7 @@ class LibvirtKvm(VirtBase):
ET.SubElement(
qemu,
"qemu:arg",
- {"value": "user,hostfwd=" "tcp:%s:%d-:22" % (dut_ip, port)},
+ {"value": "user,hostfwd=" "tcp:%s:%d-:22" % (sut_ip, port)},
)
def __add_vm_net_tap(self, **options):
@@ -787,7 +787,7 @@ class LibvirtKvm(VirtBase):
if not self.qga_sock_path:
self.host_logger.info(
"No QGA service between host [ %s ] and guest [ %s ]"
- % (self.host_dut.Name, self.vm_name)
+ % (self.host_sut.Name, self.vm_name)
)
return None
diff --git a/framework/rst.py b/framework/rst.py
index 61a28743..da17efcd 100644
--- a/framework/rst.py
+++ b/framework/rst.py
@@ -31,12 +31,12 @@ path2Result = "output"
class RstReport(object):
- def __init__(self, crbName, target, nic, suite, perf=False):
+ def __init__(self, node_name, target, nic, suite, perf=False):
"""
copy desc from #Name#_test_plan.rst to TestResult_#Name#.rst
"""
try:
- path = [path2Result, crbName, target, nic]
+ path = [path2Result, node_name, target, nic]
# ensure the level folder exist
for node in range(0, len(path)):
if not os.path.exists("/".join(path[: node + 1])):
@@ -68,8 +68,8 @@ class RstReport(object):
except Exception as e:
raise VerifyFailure("RST Error: " + str(e))
- def clear_all_rst(self, crbName, target):
- path = [path2Result, crbName, target]
+ def clear_all_rst(self, nodeName, target):
+ path = [path2Result, nodeName, target]
shutil.rmtree("/".join(path), True)
def write_title(self, text):
diff --git a/framework/packet.py b/framework/scapy_packet_builder.py
similarity index 85%
rename from framework/packet.py
rename to framework/scapy_packet_builder.py
index cd478bef..522239ed 100644
--- a/framework/packet.py
+++ b/framework/scapy_packet_builder.py
@@ -3,7 +3,7 @@
#
"""
-Generic packet create, transmit and analyze module
+Generic traffic create, transmit and analyze module
Base on scapy(python program for packet manipulation)
"""
import os
@@ -33,7 +33,7 @@ from .utils import convert_int2ip, convert_ip2int, get_module_path
# load extension layers
exec_file = os.path.realpath(__file__)
-DTS_PATH = exec_file.replace("/framework/packet.py", "")
+DTS_PATH = exec_file.replace("/framework/scapy_packet_builder.py", "")
# exec_file might be .pyc file, if so, remove 'c'.
TMP_PATH = (
@@ -69,9 +69,6 @@ def get_scapy_module_impcmd():
return ";".join(cmd_li)
-# packet generator type should be configured later
-PACKETGEN = "scapy"
-
LayersTypes = {
"L2": ["ether", "vlan", "1588", "arp", "lldp", "mpls", "nsh"],
# ipv4_ext_unknown, ipv6_ext_unknown
@@ -103,10 +100,6 @@ LayersTypes = {
# Saved background sniff process id
SNIFF_PIDS = {}
-# Saved packet generator process id
-# used in pktgen or tgen
-PKTGEN_PIDS = {}
-
# default filter for LLDP packet
LLDP_FILTER = {"layer": "ether", "config": {"type": "not lldp"}}
@@ -119,7 +112,7 @@ def write_raw_pkt(pkt_str, file_name):
w.close()
-class scapy(object):
+class ScapyPacketUtil(object):
SCAPY_LAYERS = {
"ether": Ether(dst="ff:ff:ff:ff:ff:ff"),
"vlan": Dot1Q(),
@@ -406,7 +399,7 @@ class scapy(object):
pkt_layer.ttl = ttl
-class Packet(object):
+class ScapyPacketBuilder(object):
"""
Module for config/create packet
Based on scapy module
@@ -437,36 +430,36 @@ class Packet(object):
pkt_type: description of packet type
defined in def_packet
args: specify a packet with a string explicitly, will ignore options
- options: special option for Packet module
+ options: special option for ScapyTrafficGenerator module
pkt_len: length of network packet
ran_payload: whether payload of packet is random
pkt_file:
- pkt_gen: packet generator type
+ tg_type: traffic generator type
now only support scapy
"""
self.pkt_opts = options
self.pkt_layers = []
- if "pkt_gen" in list(self.pkt_opts.keys()):
- if self.pkt_opts["pkt_gen"] == "scapy":
- self.pktgen = scapy()
+ if "tg_type" in list(self.pkt_opts.keys()):
+ if self.pkt_opts["tg_type"] == "scapy":
+ self.scapy_pkt_util = ScapyPacketUtil()
else:
- print("Not support other pktgen yet!!!")
+ print("Not support other traffic generators yet!!!")
else:
- self.pktgen = scapy()
+ self.scapy_pkt_util = ScapyPacketUtil()
if pkt_str is not None and type(pkt_str) == str:
self._scapy_str_to_pkt(pkt_str)
elif len(options) != 0:
self._add_pkt(self.pkt_opts)
- if self.pktgen.pkt is not None:
- self.pktgen.append_pkts()
+ if self.scapy_pkt_util.pkt is not None:
+ self.scapy_pkt_util.append_pkts()
def __len__(self):
- return len(self.pktgen.pkts)
+ return len(self.scapy_pkt_util.pkts)
def __getitem__(self, item):
- return self.pktgen.pkts[item]
+ return self.scapy_pkt_util.pkts[item]
def _add_pkt(self, options):
"""
@@ -499,7 +492,7 @@ class Packet(object):
self.config_def_layers()
# handle packet options
- payload_len = self.pkt_len - len(self.pktgen.pkt) - 4
+ payload_len = self.pkt_len - len(self.scapy_pkt_util.pkt) - 4
# if raw data has not been configured and payload should configured
if hasattr(self, "configured_layer_raw") is False and self.pkt_cfgload is True:
@@ -524,7 +517,7 @@ class Packet(object):
layer_li = [re.sub("\(.*?\)", "", i) for i in scapy_str.split("/")]
self.pkt_type = "_".join(layer_li)
self._load_pkt_layers()
- self.pktgen.assign_pkt(scapy_str)
+ self.scapy_pkt_util.assign_pkt(scapy_str)
def append_pkt(self, args=None, **kwargs):
"""
@@ -539,27 +532,27 @@ class Packet(object):
if hasattr(self, "configured_layer_raw"):
delattr(self, "configured_layer_raw")
self._add_pkt(kwargs)
- self.pktgen.append_pkts()
+ self.scapy_pkt_util.append_pkts()
def update_pkt_str(self, pkt):
self._scapy_str_to_pkt(pkt)
- self.pktgen.append_pkts()
+ self.scapy_pkt_util.append_pkts()
def update_pkt_dict(self, pkt):
self.pkt_opts = pkt
if hasattr(self, "configured_layer_raw"):
delattr(self, "configured_layer_raw")
self._add_pkt(pkt)
- self.pktgen.append_pkts()
+ self.scapy_pkt_util.append_pkts()
def update_pkt(self, pkts):
"""
- update pkts to packet object
+ update pkts to ScapyTrafficGenerator object
:param pkts: pkts to update
:type str|dict|list
:return: None
"""
- self.pktgen = scapy()
+ self.scapy_pkt_util = ScapyPacketUtil()
self.pkt_layers = []
if isinstance(pkts, str):
self.update_pkt_str(pkts)
@@ -625,7 +618,7 @@ class Packet(object):
self.pkt_type = random.choice(random_type)
self.pkt_layers = self.def_packet[self.pkt_type]["layers"]
self.check_layer_config()
- self.pktgen.add_layers(self.pkt_layers)
+ self.scapy_pkt_util.add_layers(self.pkt_layers)
# hardcode src/dst port for some protocol may cause issue
if "TCP" in self.pkt_type:
self.config_layer("tcp", {"src": 65535, "dst": 65535})
@@ -661,73 +654,73 @@ class Packet(object):
for _ in range(payload_len):
payload.append("%02x" % random.randrange(0, 255))
self.config_layer("raw", config={"payload": payload})
- self.pktgen.append_pkts()
+ self.scapy_pkt_util.append_pkts()
- def save_pcapfile(self, crb=None, filename="saved_pkts.pcap"):
+ def save_pcapfile(self, node=None, filename="saved_pkts.pcap"):
"""
- :param crb: session or crb object
+ :param node: session or node object
:param filename: location and name for packets to be saved
:return: None
"""
- # save pkts to pcap file to local path, then copy to remote tester tmp directory,
- if crb:
- trans_path = crb.tmp_file
+ # save pkts to pcap file to local path, then copy to remote TG tmp directory,
+ if node:
+ trans_path = node.tmp_file
file_name = filename
if os.path.isabs(filename): # check if the given filename with a abs path
file_dir = os.path.dirname(filename)
- out = crb.send_expect("ls -d %s" % file_dir, "# ", verify=True)
+ out = node.send_expect("ls -d %s" % file_dir, "# ", verify=True)
if not isinstance(out, str):
- raise Exception("%s may not existed on %s" % (file_dir, crb.name))
- wrpcap(filename, self.pktgen.pkts)
+ raise Exception("%s may not existed on %s" % (file_dir, node.name))
+ wrpcap(filename, self.scapy_pkt_util.pkts)
trans_path = os.path.abspath(filename)
file_name = filename.split(os.path.sep)[-1]
# write packets to local tmp path $dts/ouput/tmp/pcap/
- wrpcap(TMP_PATH + file_name, self.pktgen.pkts)
- # copy to remote tester tmp path /tmp/tester
- crb.session.copy_file_to(TMP_PATH + file_name, trans_path)
+ wrpcap(TMP_PATH + file_name, self.scapy_pkt_util.pkts)
+ # copy to remote TG tmp path /tmp/tg
+ node.session.copy_file_to(TMP_PATH + file_name, trans_path)
else:
- wrpcap(filename, self.pktgen.pkts)
+ wrpcap(filename, self.scapy_pkt_util.pkts)
- def read_pcapfile(self, filename, crb=None):
+ def read_pcapfile(self, filename, node=None):
"""
:param filename: packet to be read from
- :param crb: session or crb object
+ :param node: session or node object
:return: scapy type packet
"""
# read pcap file from local or remote, then append to pkts list
- # if crb, read pakcet from remote server, else read from local location
- if crb:
- out = crb.send_expect("ls -d %s" % filename, "# ", verify=True)
+ # if node, read pakcet from remote server, else read from local location
+ if node:
+ out = node.send_expect("ls -d %s" % filename, "# ", verify=True)
if not isinstance(out, str):
- raise Exception("%s may not existed on %s" % (filename, crb.name))
- crb.session.copy_file_from(filename, TMP_PATH)
+ raise Exception("%s may not existed on %s" % (filename, node.name))
+ node.session.copy_file_from(filename, TMP_PATH)
p = rdpcap(TMP_PATH + filename.split(os.path.sep)[-1])
else:
p = rdpcap(filename)
if len(p) == 0:
return None
- self.pktgen.assign_pkt(p[-1])
+ self.scapy_pkt_util.assign_pkt(p[-1])
for i in p:
- self.pktgen.pkts.append(i)
+ self.scapy_pkt_util.pkts.append(i)
return p
- def send_pkt_bg_with_pcapfile(self, crb, tx_port="", count=1, loop=0, inter=0):
+ def send_pkt_bg_with_pcapfile(self, node, tx_port="", count=1, loop=0, inter=0):
"""
send packet background with a pcap file, got an advantage in sending a large number of packets
- :param crb: session or crb object
+ :param node: session or node object
:param tx_port: ether to send packet
:param count: send times
:param loop: send packet in a loop
:param inter: interval time per packet
:return: send session
"""
- if crb.name != "tester":
- raise Exception("crb should be tester")
- wrpcap("_", self.pktgen.pkts)
+ if node.name != "tg":
+ raise Exception("node should be tg")
+ wrpcap("_", self.scapy_pkt_util.pkts)
file_path = "/tmp/%s.pcap" % tx_port
- scapy_session_bg = crb.prepare_scapy_env()
+ scapy_session_bg = node.prepare_scapy_env()
scapy_session_bg.copy_file_to("_", file_path)
scapy_session_bg.send_expect('pkts = rdpcap("%s")' % file_path, ">>> ")
scapy_session_bg.send_command(
@@ -756,9 +749,9 @@ class Packet(object):
pkt_str = re.sub(r"NVGRE\(\)|NVGRE\(TNI=\s*(0x)*\d*\)", nvgre, pkt_str)
return pkt_str
- def gernerator_pkt_str(self):
+ def generator_pkt_str(self):
pkt_str_list = []
- for p in self.pktgen.pkts:
+ for p in self.scapy_pkt_util.pkts:
if not isinstance(p, str):
p_str = p.command()
else:
@@ -769,26 +762,26 @@ class Packet(object):
pkt_str_list.append(p_str)
return "[" + ",".join(pkt_str_list) + "]"
- def send_pkt(self, crb, tx_port="", count=1, interval=0, timeout=120):
- p_str = self.gernerator_pkt_str()
+ def send_pkt(self, node, tx_port="", count=1, interval=0, timeout=120):
+ p_str = self.generator_pkt_str()
pkts_str = self._recompose_pkts_str(pkts_str=p_str)
cmd = (
"sendp("
+ pkts_str
+ f',iface="{tx_port}",count={count},inter={interval},verbose=False)'
)
- if crb.name == "tester":
- crb.scapy_session.send_expect(cmd, ">>> ", timeout=timeout)
- elif crb.name.startswith("tester_scapy"):
- crb.send_expect(cmd, ">>> ", timeout=timeout)
+ if node.name == "tg":
+ node.scapy_session.send_expect(cmd, ">>> ", timeout=timeout)
+ elif node.name.startswith("tg_scapy"):
+ node.send_expect(cmd, ">>> ", timeout=timeout)
else:
- raise Exception("crb should be tester's session and initialized")
+ raise Exception("node should be tg's session and initialized")
- def send_pkt_bg(self, crb, tx_port="", count=-1, interval=0, loop=1):
- if crb.name != "tester":
- raise Exception("crb should be tester")
- scapy_session_bg = crb.prepare_scapy_env()
- p_str = self.gernerator_pkt_str()
+ def send_pkt_bg(self, node, tx_port="", count=-1, interval=0, loop=1):
+ if node.name != "tg":
+ raise Exception("node should be tg")
+ scapy_session_bg = node.prepare_scapy_env()
+ p_str = self.generator_pkt_str()
pkts_str = self._recompose_pkts_str(pkts_str=p_str)
cmd = (
"sendp("
@@ -842,9 +835,9 @@ class Packet(object):
self.pkt_layers.remove(l_type)
print("INVAILD LAYER TYPE [%s]" % l_type.upper())
- self.pktgen.add_layers(self.pkt_layers)
+ self.scapy_pkt_util.add_layers(self.pkt_layers)
if layers:
- self.pktgen.update_pkts()
+ self.scapy_pkt_util.update_pkts()
def _load_pkt_layers(self):
name2type = {
@@ -935,10 +928,10 @@ class Packet(object):
if "inner" in layer:
layer = layer[6:]
- if isinstance(self.pktgen.pkt, str):
+ if isinstance(self.scapy_pkt_util.pkt, str):
raise Exception("string type packet not support config layer")
- pkt_layer = self.pktgen.pkt.getlayer(idx)
- layer_conf = getattr(self.pktgen, layer)
+ pkt_layer = self.scapy_pkt_util.pkt.getlayer(idx)
+ layer_conf = getattr(self.scapy_pkt_util, layer)
setattr(self, "configured_layer_%s" % layer, True)
layer_conf(pkt_layer, **config)
@@ -966,16 +959,16 @@ class Packet(object):
return strip_element(element, p_index)
def strip_element_layer2(self, element, p_index=0):
- return self.pktgen.strip_layer2(element, p_index)
+ return self.scapy_pkt_util.strip_layer2(element, p_index)
def strip_element_layer3(self, element, p_index=0):
- return self.pktgen.strip_layer3(element, p_index)
+ return self.scapy_pkt_util.strip_layer3(element, p_index)
def strip_element_vlan(self, element, p_index=0):
- return self.pktgen.strip_vlan(element, p_index)
+ return self.scapy_pkt_util.strip_vlan(element, p_index)
def strip_element_layer4(self, element, p_index=0):
- return self.pktgen.strip_layer4(element, p_index)
+ return self.scapy_pkt_util.strip_layer4(element, p_index)
def IncreaseIP(addr):
@@ -1079,22 +1072,22 @@ def get_filter_cmd(filters=[]):
return ""
-def start_tcpdump(crb, intf, count=0, filters=None, lldp_forbid=True):
+def start_tcpdump(node, intf, count=0, filters=None, lldp_forbid=True):
"""
sniff all packets from certain port
"""
filters = [] if filters is None else filters
- out = crb.send_expect("ls -d %s" % crb.tmp_file, "# ", verify=True)
+ out = node.send_expect("ls -d %s" % node.tmp_file, "# ", verify=True)
if out == 2:
- crb.send_expect("mkdir -p %s" % crb.tmp_file, "# ")
- filename = "{}sniff_{}.pcap".format(crb.tmp_file, intf)
+ node.send_expect("mkdir -p %s" % node.tmp_file, "# ")
+ filename = "{}sniff_{}.pcap".format(node.tmp_file, intf)
# delete old pcap file
- crb.send_expect("rm -rf %s" % filename, "# ")
+ node.send_expect("rm -rf %s" % filename, "# ")
param = ""
direct_param = r"(\s+)\[ (\S+) in\|out\|inout \]"
- tcpdump_session = crb.create_session("tcpdump_session" + str(time.time()))
- setattr(tcpdump_session, "tmp_file", crb.tmp_file)
+ tcpdump_session = node.create_session("tcpdump_session" + str(time.time()))
+ setattr(tcpdump_session, "tmp_file", node.tmp_file)
tcpdump_help = tcpdump_session.send_command("tcpdump -h")
for line in tcpdump_help.split("\n"):
@@ -1147,10 +1140,10 @@ def stop_and_load_tcpdump_packets(index="", timeout=1):
pipe.get_session_before(timeout)
pipe.send_command("^C")
pipe.copy_file_from(filename, TMP_PATH)
- p = Packet()
- p.read_pcapfile(TMP_PATH + filename.split(os.sep)[-1])
+ scapy_pkt_builder = ScapyPacketBuilder()
+ scapy_pkt_builder.read_pcapfile(TMP_PATH + filename.split(os.sep)[-1])
pipe.close()
- return p
+ return scapy_pkt_builder
def compare_pktload(pkt1=None, pkt2=None, layer="L2"):
@@ -1174,7 +1167,7 @@ def compare_pktload(pkt1=None, pkt2=None, layer="L2"):
return False
-def strip_pktload(pkt=None, layer="L2", p_index=0):
+def strip_pktload(scapy_pkt_builder=None, layer="L2", p_index=0):
if layer == "L2":
l_idx = 0
elif layer == "L3":
@@ -1184,7 +1177,7 @@ def strip_pktload(pkt=None, layer="L2", p_index=0):
else:
l_idx = 0
try:
- load = hexstr(str(pkt.pktgen.pkts[p_index].getlayer(l_idx)), onlyhex=1)
+ load = hexstr(str(scapy_pkt_builder.scapy_pkt_util.pkts[p_index].getlayer(l_idx)), onlyhex=1)
except:
# return pass when scapy failed to extract packet
load = ""
@@ -1195,31 +1188,31 @@ def strip_pktload(pkt=None, layer="L2", p_index=0):
###############################################################################
###############################################################################
if __name__ == "__main__":
- pkt = Packet("Ether(type=0x894f)/NSH(Len=0x6,NextProto=0x0,NSP=0x000002,NSI=0xff)")
- sendp(pkt, iface="lo")
- pkt.append_pkt(pkt_type="IPv6_TCP", pkt_len=100)
- pkt.append_pkt(pkt_type="TCP", pkt_len=100)
- pkt.config_layer("tcp", config={"flags": "A"})
- pkt.append_pkt(
+ scapy_pkt_builder = ScapyPacketBuilder("Ether(type=0x894f)/NSH(Len=0x6,NextProto=0x0,NSP=0x000002,NSI=0xff)")
+ sendp(scapy_pkt_builder, iface="lo")
+ scapy_pkt_builder.append_pkt(pkt_type="IPv6_TCP", pkt_len=100)
+ scapy_pkt_builder.append_pkt(pkt_type="TCP", pkt_len=100)
+ scapy_pkt_builder.config_layer("tcp", config={"flags": "A"})
+ scapy_pkt_builder.append_pkt(
"Ether(dst='11:22:33:44:55:11')/IP(dst='192.168.5.2')/TCP(flags=0)/Raw(load='bbbb')"
)
- pkt.generate_random_pkts(
+ scapy_pkt_builder.generate_random_pkts(
"11:22:33:44:55:55",
random_type=["TCP", "IPv6_TCP"],
random_payload=True,
pktnum=10,
)
- sendp(pkt, iface="lo")
+ sendp(scapy_pkt_builder, iface="lo")
- pkt = Packet(pkt_type="UDP", pkt_len=1500, ran_payload=True)
- sendp(pkt, iface="lo")
- pkt = Packet(pkt_type="IPv6_SCTP")
- sendp(pkt, iface="lo")
- pkt = Packet(pkt_type="VLAN_UDP")
- pkt.config_layer("vlan", {"vlan": 2})
- sendp(pkt, iface="lo")
+ scapy_pkt_builder = ScapyPacketBuilder(pkt_type="UDP", pkt_len=1500, ran_payload=True)
+ sendp(scapy_pkt_builder, iface="lo")
+ scapy_pkt_builder = ScapyPacketBuilder(pkt_type="IPv6_SCTP")
+ sendp(scapy_pkt_builder, iface="lo")
+ scapy_pkt_builder = ScapyPacketBuilder(pkt_type="VLAN_UDP")
+ scapy_pkt_builder.config_layer("vlan", {"vlan": 2})
+ sendp(scapy_pkt_builder, iface="lo")
- pkt.assign_layers(
+ scapy_pkt_builder.assign_layers(
[
"ether",
"vlan",
@@ -1232,15 +1225,15 @@ if __name__ == "__main__":
"raw",
]
)
- pkt.config_layer("ether", {"dst": "00:11:22:33:44:55"})
- pkt.config_layer("vlan", {"vlan": 2})
- pkt.config_layer("ipv4", {"dst": "1.1.1.1"})
- pkt.config_layer("udp", {"src": 4789, "dst": 4789, "chksum": 0x1111})
- pkt.config_layer("vxlan", {"vni": 2})
- pkt.config_layer("raw", {"payload": ["58"] * 18})
- sendp(pkt, iface="lo")
-
- pkt.assign_layers(
+ scapy_pkt_builder.config_layer("ether", {"dst": "00:11:22:33:44:55"})
+ scapy_pkt_builder.config_layer("vlan", {"vlan": 2})
+ scapy_pkt_builder.config_layer("ipv4", {"dst": "1.1.1.1"})
+ scapy_pkt_builder.config_layer("udp", {"src": 4789, "dst": 4789, "chksum": 0x1111})
+ scapy_pkt_builder.config_layer("vxlan", {"vni": 2})
+ scapy_pkt_builder.config_layer("raw", {"payload": ["58"] * 18})
+ sendp(scapy_pkt_builder, iface="lo")
+
+ scapy_pkt_builder.assign_layers(
[
"ether",
"vlan",
@@ -1254,7 +1247,7 @@ if __name__ == "__main__":
]
)
# config packet
- pkt.config_layers(
+ scapy_pkt_builder.config_layers(
[
("ether", {"dst": "00:11:22:33:44:55"}),
("ipv4", {"dst": "1.1.1.1"}),
@@ -1263,4 +1256,4 @@ if __name__ == "__main__":
]
)
- sendp(pkt, iface="lo")
+ sendp(scapy_pkt_builder, iface="lo")
diff --git a/framework/settings.py b/framework/settings.py
index 78b1250c..93ae66b4 100644
--- a/framework/settings.py
+++ b/framework/settings.py
@@ -174,7 +174,7 @@ SCAPY2IXIA = ["Ether", "Dot1Q", "IP", "IPv6", "TCP", "UDP", "SCTP"]
USERNAME = "root"
# A user used to test functionality for a non-root user
-UNPRIVILEGED_USERNAME = "dtsunprivilegedtester"
+UNPRIVILEGED_USERNAME = "dtsunprivilegedtg"
"""
Helpful header sizes.
@@ -203,12 +203,20 @@ TIMEOUT = 15
"""
Global macro for dts.
"""
-PKTGEN = "pktgen"
-PKTGEN_DPDK = "dpdk"
-PKTGEN_TREX = "trex"
-PKTGEN_IXIA = "ixia"
-PKTGEN_IXIA_NETWORK = "ixia_network"
-PKTGEN_GRP = frozenset([PKTGEN_DPDK, PKTGEN_TREX, PKTGEN_IXIA, PKTGEN_IXIA_NETWORK])
+PERF_TG_CONF_KEY = "perf_tg"
+TG_DPDK = "dpdk"
+TG_TREX = "trex"
+TG_IXEXPLORER = "ixexplorer"
+TG_IXNETWORK = "ixnetwork"
+PERF_TG_TYPES = frozenset([TG_DPDK, TG_TREX, TG_IXEXPLORER, TG_IXNETWORK])
+
+"""
+Macro definition.
+"""
+TRANSMIT_CONT = "continuous"
+TRANSMIT_M_BURST = "multi_burst"
+TRANSMIT_S_BURST = "single_burst"
+
"""
The log name seperater.
"""
@@ -245,8 +253,8 @@ DTS global error table
DTS_ERR_TBL = {
"GENERIC_ERR": 1,
"DPDK_BUILD_ERR": 2,
- "DUT_SETUP_ERR": 3,
- "TESTER_SETUP_ERR": 4,
+ "SUT_SETUP_ERR": 3,
+ "TG_SETUP_ERR": 4,
"SUITE_SETUP_ERR": 5,
"SUITE_EXECUTE_ERR": 6,
"PARALLEL_EXECUTE_ERR": 7,
@@ -274,8 +282,8 @@ def get_nic_driver(pci_id):
return driver
-def get_netdev(crb, pci):
- for port in crb.ports_info:
+def get_netdev(node, pci):
+ for port in node.ports_info:
if pci == port["pci"]:
return port["port"]
if "vfs_port" in list(port.keys()):
diff --git a/framework/ssh_connection.py b/framework/ssh_connection.py
index 903b00cd..be2f80af 100644
--- a/framework/ssh_connection.py
+++ b/framework/ssh_connection.py
@@ -18,8 +18,8 @@ class SSHConnection(object):
Implement send_expect/copy function upper SSHPexpect module.
"""
- def __init__(self, host, session_name, username, password="", dut_id=0):
- self.session = SSHPexpect(host, username, password, dut_id)
+ def __init__(self, host, session_name, username, password="", sut_id=0):
+ self.session = SSHPexpect(host, username, password, sut_id)
self.name = session_name
connection = {}
connection[self.name] = self.session
@@ -83,8 +83,8 @@ class SSHConnection(object):
return True
- def copy_file_from(self, src, dst=".", password="", crb_session=None):
- self.session.copy_file_from(src, dst, password, crb_session)
+ def copy_file_from(self, src, dst=".", password="", node_session=None):
+ self.session.copy_file_from(src, dst, password, node_session)
- def copy_file_to(self, src, dst="~/", password="", crb_session=None):
- self.session.copy_file_to(src, dst, password, crb_session)
+ def copy_file_to(self, src, dst="~/", password="", node_session=None):
+ self.session.copy_file_to(src, dst, password, node_session)
diff --git a/framework/ssh_pexpect.py b/framework/ssh_pexpect.py
index 97406896..38c676f5 100644
--- a/framework/ssh_pexpect.py
+++ b/framework/ssh_pexpect.py
@@ -8,14 +8,14 @@ from .exception import SSHConnectionException, SSHSessionDeadException, TimeoutE
from .utils import GREEN, RED, parallel_lock
"""
-Module handle ssh sessions between tester and DUT.
+Module handles ssh sessions between TG and SUT.
Implements send_expect function to send command and get output data.
-Also supports transfer files to tester or DUT.
+Also supports transfer files to TG or SUT.
"""
class SSHPexpect:
- def __init__(self, host, username, password, dut_id):
+ def __init__(self, host, username, password, sut_id):
self.magic_prompt = "MAGIC PROMPT"
self.logger = None
@@ -23,13 +23,13 @@ class SSHPexpect:
self.username = username
self.password = password
- self._connect_host(dut_id=dut_id)
+ self._connect_host(sut_id=sut_id)
@parallel_lock(num=8)
- def _connect_host(self, dut_id=0):
+ def _connect_host(self, sut_id=0):
"""
- Create connection to assigned crb, parameter dut_id will be used in
- parallel_lock thus can assure isolated locks for each crb.
+ Create connection to assigned node, parameter sut_id will be used in
+ parallel_lock thus can assure isolated locks for each node.
Parallel ssh connections are limited to MaxStartups option in SSHD
configuration file. By default concurrent number is 10, so default
threads number is limited to 8 which less than 10. Lock number can
@@ -199,7 +199,7 @@ class SSHPexpect:
def isalive(self):
return self.session.isalive()
- def copy_file_from(self, src, dst=".", password="", crb_session=None):
+ def copy_file_from(self, src, dst=".", password="", node_session=None):
"""
Copies a file from a remote place into local.
"""
@@ -209,11 +209,11 @@ class SSHPexpect:
str(self.port), self.username, self.ip, src, dst
)
if password == "":
- self._spawn_scp(command, self.password, crb_session)
+ self._spawn_scp(command, self.password, node_session)
else:
- self._spawn_scp(command, password, crb_session)
+ self._spawn_scp(command, password, node_session)
- def copy_file_to(self, src, dst="~/", password="", crb_session=None):
+ def copy_file_to(self, src, dst="~/", password="", node_session=None):
"""
Sends a local file to a remote place.
"""
@@ -227,21 +227,21 @@ class SSHPexpect:
src, self.username, self.host, dst
)
if password == "":
- self._spawn_scp(command, self.password, crb_session)
+ self._spawn_scp(command, self.password, node_session)
else:
- self._spawn_scp(command, password, crb_session)
+ self._spawn_scp(command, password, node_session)
- def _spawn_scp(self, scp_cmd, password, crb_session):
+ def _spawn_scp(self, scp_cmd, password, node_session):
"""
Transfer a file with SCP
"""
self.logger.info(scp_cmd)
- # if crb_session is not None, copy file from/to crb env
- # if crb_session is None, copy file from/to current dts env
- if crb_session is not None:
- crb_session.session.clean_session()
- crb_session.session.__sendline(scp_cmd)
- p = crb_session.session.session
+ # if node_session is not None, copy file from/to node env
+ # if node_session is None, copy file from/to current dts env
+ if node_session is not None:
+ node_session.session.clean_session()
+ node_session.session.__sendline(scp_cmd)
+ p = node_session.session.session
else:
p = pexpect.spawn(scp_cmd)
time.sleep(0.5)
@@ -259,5 +259,5 @@ class SSHPexpect:
p.expect("Exit status 0", 60)
if i == 4:
self.logger.error("SCP TIMEOUT error %d" % i)
- if crb_session is None:
+ if node_session is None:
p.close()
diff --git a/framework/stats_reporter.py b/framework/stats_reporter.py
index e9e7173d..f954a7aa 100644
--- a/framework/stats_reporter.py
+++ b/framework/stats_reporter.py
@@ -28,25 +28,25 @@ class StatsReporter(object):
self.total += 1
def __count_stats(self):
- for dut in self.result.all_duts():
- for target in self.result.all_targets(dut):
- for suite in self.result.all_test_suites(dut, target):
- for case in self.result.all_test_cases(dut, target, suite):
- test_result = self.result.result_for(dut, target, suite, case)
+ for sut in self.result.all_suts():
+ for target in self.result.all_targets(sut):
+ for suite in self.result.all_test_suites(sut, target):
+ for case in self.result.all_test_cases(sut, target, suite):
+ test_result = self.result.result_for(sut, target, suite, case)
if len(test_result):
self.__add_stat(test_result)
def __write_stats(self):
- duts = self.result.all_duts()
- if len(duts) == 1:
+ sut_nodes = self.result.all_suts()
+ if len(sut_nodes) == 1:
self.stats_file.write(
- "dpdk_version = {}\n".format(self.result.current_dpdk_version(duts[0]))
+ "dpdk_version = {}\n".format(self.result.current_dpdk_version(sut_nodes[0]))
)
else:
- for dut in duts:
- dpdk_version = self.result.current_dpdk_version(dut)
+ for sut in sut_nodes:
+ dpdk_version = self.result.current_dpdk_version(sut)
self.stats_file.write(
- "{}.dpdk_version = {}\n".format(dut, dpdk_version)
+ "{}.dpdk_version = {}\n".format(sut, dpdk_version)
)
self.__count_stats()
self.stats_file.write("Passed = %d\n" % self.passed)
diff --git a/framework/dut.py b/framework/sut_node.py
similarity index 89%
rename from framework/dut.py
rename to framework/sut_node.py
index b8c01f47..98c584dc 100644
--- a/framework/dut.py
+++ b/framework/sut_node.py
@@ -13,8 +13,8 @@ import framework.settings as settings
from nics.net_device import GetNicObj
from .config import AppNameConf, PortConf
-from .crb import Crb
from .exception import ParameterInvalidException
+from .node import Node
from .settings import LOG_NAME_SEP, NICS
from .ssh_connection import SSHConnection
from .test_result import ResultTable
@@ -22,31 +22,28 @@ from .utils import RED, remove_old_rsa_key
from .virt_resource import VirtResource
-class Dut(Crb):
+class SutNode(Node):
"""
- A connection to the CRB under test.
- This class sends commands to the CRB and validates the responses. It is
- implemented using either ssh for linuxapp or the terminal server for
- baremetal.
- All operations are in fact delegated to an instance of either CRBLinuxApp
- or CRBBareMetal.
+ A class for managing connections to the System under test, providing
+ methods that retrieve the necessary information about the node (such as
+ cpu, memory and NIC details) and configuration capabilities.
"""
- PORT_MAP_CACHE_KEY = "dut_port_map"
- PORT_INFO_CACHE_KEY = "dut_port_info"
- NUMBER_CORES_CACHE_KEY = "dut_number_cores"
- CORE_LIST_CACHE_KEY = "dut_core_list"
- PCI_DEV_CACHE_KEY = "dut_pci_dev_info"
+ PORT_MAP_CACHE_KEY = "sut_port_map"
+ PORT_INFO_CACHE_KEY = "sut_port_info"
+ NUMBER_CORES_CACHE_KEY = "sut_number_cores"
+ CORE_LIST_CACHE_KEY = "sut_core_list"
+ PCI_DEV_CACHE_KEY = "sut_pci_dev_info"
- def __init__(self, crb, serializer, dut_id=0, name=None, alt_session=True):
+ def __init__(self, node, serializer, sut_id=0, name=None, alt_session=True):
if not name:
- name = "dut" + LOG_NAME_SEP + "%s" % crb["My IP"]
+ name = "sut" + LOG_NAME_SEP + "%s" % node["My IP"]
self.NAME = name
- super(Dut, self).__init__(crb, serializer, dut_id, name, alt_session)
+ super(SutNode, self).__init__(node, serializer, sut_id, name, alt_session)
self.host_init_flag = False
self.number_of_cores = 0
- self.tester = None
+ self.tg_node = None
self.cores = []
self.architecture = None
self.conf = PortConf()
@@ -63,11 +60,11 @@ class Dut(Crb):
self.dpdk_version = ""
self.nic = None
- def filter_cores_from_crb_cfg(self):
- # get core list from crbs.cfg
+ def filter_cores_from_node_cfg(self):
+ # get core list from nodes.cfg
core_list = []
all_core_list = [str(core["core"]) for core in self.cores]
- core_list_str = self.crb["dut_cores"]
+ core_list_str = self.node["sut_cores"]
if core_list_str == "":
core_list = all_core_list
split_by_comma = core_list_str.split(",")
@@ -171,7 +168,7 @@ class Dut(Crb):
}
eal_parameter_creator = _EalParameter(
- dut=self, fixed_prefix=fixed_prefix, socket=socket, **config
+ sut_node=self, fixed_prefix=fixed_prefix, socket=socket, **config
)
eal_str = eal_parameter_creator.make_eal_param()
@@ -205,7 +202,7 @@ class Dut(Crb):
def new_session(self, suite=""):
"""
- Create new session for dut instance. Session name will be unique.
+ Create new session for SUT instance. Session name will be unique.
"""
if len(suite):
session_name = self.NAME + "_" + suite
@@ -224,13 +221,13 @@ class Dut(Crb):
def close_session(self, session):
"""
- close new session in dut instance
+ close new session in SUT instance
"""
self.destroy_session(session)
def set_nic_type(self, nic_type):
"""
- Set CRB NICS ready to validated.
+ Set Node NICS ready to validated.
"""
self.nic_type = nic_type
if "cfg" in nic_type:
@@ -239,7 +236,7 @@ class Dut(Crb):
def set_toolchain(self, target):
"""
This looks at the current target and instantiates an attribute to
- be either a CRBLinuxApp or CRBBareMetal object. These latter two
+ be either a NodeLinuxApp or NodeBareMetal object. These latter two
classes are private and should not be used directly by client code.
"""
self.kill_all()
@@ -276,28 +273,28 @@ class Dut(Crb):
def get_ip_address(self):
"""
- Get DUT's ip address.
+ Get SUT's ip address.
"""
- return self.crb["IP"]
+ return self.node["IP"]
def get_password(self):
"""
- Get DUT's login password.
+ Get SUT's login password.
"""
- return self.crb["pass"]
+ return self.node["pass"]
def get_username(self):
"""
- Get DUT's login username.
+ Get SUT's login username.
"""
- return self.crb["user"]
+ return self.node["user"]
- def dut_prerequisites(self):
+ def sut_prerequisites(self):
"""
Prerequest function should be called before execute any test case.
- Will call function to scan all lcore's information which on DUT.
+ Will call function to scan all lcore's information which on SUT.
Then call pci scan function to collect nic device information.
- At last setup DUT' environment for validation.
+ At last setup SUT' environment for validation.
"""
out = self.send_expect("cd %s" % self.base_dir, "# ")
assert "No such file or directory" not in out, "Can't switch to dpdk folder!!!"
@@ -313,13 +310,13 @@ class Dut(Crb):
self.send_expect("alias sed=gsed", "# ")
self.init_core_list()
- self.filter_cores_from_crb_cfg()
+ self.filter_cores_from_node_cfg()
self.pci_devices_information()
# make sure ipv6 enable before scan
- self.enable_tester_ipv6()
+ self.enable_tg_ipv6()
# scan ports before restore interface
self.scan_ports()
- # restore dut ports to kernel
+ # restore SUT ports to kernel
self.restore_interfaces()
# rescan ports after interface up
self.rescan_ports()
@@ -328,8 +325,8 @@ class Dut(Crb):
self.mount_procfs()
# auto detect network topology
self.map_available_ports()
- # disable tester port ipv6
- self.disable_tester_ipv6()
+ # disable TG port ipv6
+ self.disable_tg_ipv6()
self.get_nic_configurations()
# print latest ports_info
@@ -465,7 +462,7 @@ class Dut(Crb):
def setup_memory(self, hugepages=-1):
"""
- Setup hugepage on DUT.
+ Setup hugepage on SUT.
"""
try:
function_name = "setup_memory_%s" % self.get_os_type()
@@ -552,7 +549,7 @@ class Dut(Crb):
def is_ssh_session_port(self, pci_bus):
"""
- Check if the pci device is the dut SSH session port.
+ Check if the pci device is the SUT SSH session port.
"""
port = None
for port_info in self.ports_info:
@@ -683,7 +680,7 @@ class Dut(Crb):
def get_ports(self, nic_type="any", perf=None, socket=None):
"""
- Return DUT port list with the filter of NIC type, whether run IXIA
+ Return SUT port list with the filter of NIC type, whether run IXIA
performance test, whether request specified socket.
"""
ports = []
@@ -716,7 +713,7 @@ class Dut(Crb):
or socket == port_info["numa"]
):
# port has link,
- if self.tester.get_local_port(portid) != -1:
+ if self.tg_node.get_local_port(portid) != -1:
ports.append(portid)
return ports
@@ -771,7 +768,7 @@ class Dut(Crb):
def get_peer_pci(self, port_num):
"""
- return the peer pci address of dut port
+ return the peer pci address of SUT port
"""
if "peer" not in self.ports_info[port_num]:
return None
@@ -780,13 +777,13 @@ class Dut(Crb):
def get_mac_address(self, port_num):
"""
- return the port mac on dut
+ return the port mac on SUT
"""
return self.ports_info[port_num]["mac"]
def get_ipv6_address(self, port_num):
"""
- return the IPv6 address on dut
+ return the IPv6 address on SUT
"""
return self.ports_info[port_num]["ipv6"]
@@ -814,7 +811,7 @@ class Dut(Crb):
result_table.table_print()
def get_memory_channels(self):
- n = self.crb["memory channels"]
+ n = self.node["memory channels"]
if n is not None and n > 0:
return n
else:
@@ -898,7 +895,7 @@ class Dut(Crb):
port = port_info["port"]
intf = port.get_interface_name()
if "No such file" in intf:
- self.logger.info("DUT: [%s] %s" % (port_info["pci"], unknow_interface))
+ self.logger.info("SUT: [%s] %s" % (port_info["pci"], unknow_interface))
continue
self.send_expect("ifconfig %s up" % intf, "# ")
time.sleep(5)
@@ -942,14 +939,14 @@ class Dut(Crb):
def scan_ports_cached(self):
"""
- Scan cached ports, instantiate tester port
+ Scan cached ports, instantiate TG port
"""
scan_ports_cached = getattr(self, "scan_ports_cached_%s" % self.get_os_type())
return scan_ports_cached()
def scan_ports_cached_linux(self):
"""
- Scan Linux ports and instantiate tester port
+ Scan Linux ports and instantiate TG port
"""
if self.ports_info is None:
return
@@ -964,7 +961,7 @@ class Dut(Crb):
port_info["port"] = port
self.logger.info(
- "DUT cached: [%s %s] %s"
+ "SUT cached: [%s %s] %s"
% (port_info["pci"], port_info["type"], port_info["intf"])
)
@@ -988,7 +985,7 @@ class Dut(Crb):
for (pci_bus, pci_id) in self.pci_devices_info:
if self.check_ports_available(pci_bus, pci_id) is False:
- self.logger.info("DUT: [%s %s] %s" % (pci_bus, pci_id, skipped))
+ self.logger.info("SUT: [%s %s] %s" % (pci_bus, pci_id, skipped))
continue
addr_array = pci_bus.split(":")
@@ -999,12 +996,12 @@ class Dut(Crb):
port = GetNicObj(self, domain_id, bus_id, devfun_id)
intf = port.get_interface_name()
if "No such file" in intf:
- self.logger.info("DUT: [%s] %s" % (pci_bus, unknow_interface))
+ self.logger.info("SUT: [%s] %s" % (pci_bus, unknow_interface))
continue
macaddr = port.get_mac_addr()
if "No such file" in intf:
- self.logger.info("DUT: [%s] %s" % (pci_bus, unknow_interface))
+ self.logger.info("SUT: [%s] %s" % (pci_bus, unknow_interface))
continue
numa = port.socket
@@ -1049,7 +1046,7 @@ class Dut(Crb):
for (pci_bus, pci_id) in self.pci_devices_info:
if not settings.accepted_nic(pci_id):
- self.logger.info("DUT: [%s %s] %s" % (pci_bus, pci_id, skipped))
+ self.logger.info("SUT: [%s %s] %s" % (pci_bus, pci_id, skipped))
continue
addr_array = pci_bus.split(":")
domain_id = addr_array[0]
@@ -1069,7 +1066,7 @@ class Dut(Crb):
self.logger.warning("NUMA not available on FreeBSD")
- self.logger.info("DUT: [%s %s] %s %s" % (pci_bus, pci_id, intf, ipv6))
+ self.logger.info("SUT: [%s %s] %s %s" % (pci_bus, pci_id, intf, ipv6))
# convert bsd format to linux format
pci_split = pci_bus.split(":")
@@ -1099,8 +1096,8 @@ class Dut(Crb):
Setup current virtualization hypervisor type and remove elder VM ssh keys
"""
self.virttype = virttype
- # remove VM rsa keys from tester
- remove_old_rsa_key(self.tester, self.crb["My IP"])
+ # remove VM rsa keys from TG
+ remove_old_rsa_key(self.tg_node, self.node["My IP"])
def generate_sriov_vfs_by_port(self, port_id, vf_num, driver="default"):
"""
@@ -1205,7 +1202,7 @@ class Dut(Crb):
self.map_available_ports_uncached()
self.serializer.save(self.PORT_MAP_CACHE_KEY, self.ports_map)
- self.logger.warning("DUT PORT MAP: " + str(self.ports_map))
+ self.logger.warning("SUT PORT MAP: " + str(self.ports_map))
def map_available_ports_uncached(self):
"""
@@ -1218,103 +1215,103 @@ class Dut(Crb):
remove = []
self.ports_map = [-1] * nrPorts
- hits = [False] * len(self.tester.ports_info)
+ hits = [False] * len(self.tg_node.ports_info)
- for dutPort in range(nrPorts):
- peer = self.get_peer_pci(dutPort)
- dutpci = self.ports_info[dutPort]["pci"]
+ for sutPort in range(nrPorts):
+ peer = self.get_peer_pci(sutPort)
+ sutpci = self.ports_info[sutPort]["pci"]
if peer is not None:
- for remotePort in range(len(self.tester.ports_info)):
- if self.tester.ports_info[remotePort]["type"].lower() == "trex":
+ for remotePort in range(len(self.tg_node.ports_info)):
+ if self.tg_node.ports_info[remotePort]["type"].lower() == "trex":
if (
- self.tester.ports_info[remotePort]["intf"].lower()
+ self.tg_node.ports_info[remotePort]["intf"].lower()
== peer.lower()
- or self.tester.ports_info[remotePort]["pci"].lower()
+ or self.tg_node.ports_info[remotePort]["pci"].lower()
== peer.lower()
):
hits[remotePort] = True
- self.ports_map[dutPort] = remotePort
+ self.ports_map[sutPort] = remotePort
break
elif (
- self.tester.ports_info[remotePort]["pci"].lower()
+ self.tg_node.ports_info[remotePort]["pci"].lower()
== peer.lower()
):
hits[remotePort] = True
- self.ports_map[dutPort] = remotePort
+ self.ports_map[sutPort] = remotePort
break
- if self.ports_map[dutPort] == -1:
- self.logger.error("CONFIGURED TESTER PORT CANNOT BE FOUND!!!")
+ if self.ports_map[sutPort] == -1:
+ self.logger.error("CONFIGURED TG PORT CANNOT BE FOUND!!!")
else:
continue # skip ping6 map
- for remotePort in range(len(self.tester.ports_info)):
+ for remotePort in range(len(self.tg_node.ports_info)):
if hits[remotePort]:
continue
# skip ping self port
- remotepci = self.tester.ports_info[remotePort]["pci"]
- if (self.crb["IP"] == self.crb["tester IP"]) and (dutpci == remotepci):
+ remotepci = self.tg_node.ports_info[remotePort]["pci"]
+ if (self.node["IP"] == self.node["tg IP"]) and (sutpci == remotepci):
continue
# skip ping those not connected port
- ipv6 = self.get_ipv6_address(dutPort)
+ ipv6 = self.get_ipv6_address(sutPort)
if ipv6 == "Not connected":
- if "ipv4" in self.tester.ports_info[remotePort]:
- out = self.tester.send_ping(
- dutPort,
- self.tester.ports_info[remotePort]["ipv4"],
- self.get_mac_address(dutPort),
+ if "ipv4" in self.tg_node.ports_info[remotePort]:
+ out = self.tg_node.send_ping(
+ sutPort,
+ self.tg_node.ports_info[remotePort]["ipv4"],
+ self.get_mac_address(sutPort),
)
else:
continue
else:
if getattr(self, "send_ping6", None):
out = self.send_ping6(
- dutPort,
- self.tester.ports_info[remotePort]["ipv6"],
- self.get_mac_address(dutPort),
+ sutPort,
+ self.tg_node.ports_info[remotePort]["ipv6"],
+ self.get_mac_address(sutPort),
)
else:
- out = self.tester.send_ping6(
- remotePort, ipv6, self.get_mac_address(dutPort)
+ out = self.tg_node.send_ping6(
+ remotePort, ipv6, self.get_mac_address(sutPort)
)
if out and "64 bytes from" in out:
self.logger.info(
- "PORT MAP: [dut %d: tester %d]" % (dutPort, remotePort)
+ "PORT MAP: [SUT %d: tg %d]" % (sutPort, remotePort)
)
- self.ports_map[dutPort] = remotePort
+ self.ports_map[sutPort] = remotePort
hits[remotePort] = True
- if self.crb["IP"] == self.crb["tester IP"]:
- # remove dut port act as tester port
+ if self.node["IP"] == self.node["tg IP"]:
+ # remove SUT port act as TG port
remove_port = self.get_port_info(remotepci)
if remove_port is not None:
remove.append(remove_port)
- # skip ping from those port already act as dut port
- testerPort = self.tester.get_local_index(dutpci)
- if testerPort != -1:
- hits[testerPort] = True
+ # skip ping from those port already act as SUT port
+ tgPort = self.tg_node.get_local_index(sutpci)
+ if tgPort != -1:
+ hits[tgPort] = True
break
for port in remove:
self.ports_info.remove(port)
- def disable_tester_ipv6(self):
- for tester_port in self.ports_map:
- if self.tester.ports_info[tester_port]["type"].lower() not in (
+ def disable_tg_ipv6(self):
+ for tg_port in self.ports_map:
+ if self.tg_node.ports_info[tg_port]["type"].lower() not in (
"ixia",
"trex",
):
- port = self.tester.ports_info[tester_port]["port"]
+ port = self.tg_node.ports_info[tg_port]["port"]
port.disable_ipv6()
- def enable_tester_ipv6(self):
- for tester_port in range(len(self.tester.ports_info)):
- if self.tester.ports_info[tester_port]["type"].lower() not in (
+ def enable_tg_ipv6(self):
+ for tg_port in range(len(self.tg_node.ports_info)):
+ if self.tg_node.ports_info[tg_port]["type"].lower() not in (
"ixia",
"trex",
):
- port = self.tester.ports_info[tester_port]["port"]
+ port = self.tg_node.ports_info[tg_port]["port"]
port.enable_ipv6()
def check_port_occupied(self, port):
@@ -1339,7 +1336,7 @@ class Dut(Crb):
def close(self):
"""
- Close ssh session of DUT.
+ Close ssh session of SUT.
"""
if self.session:
self.session.close()
@@ -1360,11 +1357,11 @@ class Dut(Crb):
time.sleep(3)
self.virt_pids = []
- def crb_exit(self):
+ def node_exit(self):
"""
- Recover all resource before crb exit
+ Recover all resource before node exit
"""
- self.enable_tester_ipv6()
+ self.enable_tg_ipv6()
self.close()
self.logger.logger_exit()
@@ -1372,7 +1369,7 @@ class Dut(Crb):
class _EalParameter(object):
def __init__(
self,
- dut: Dut,
+ sut_node: SutNode,
fixed_prefix: bool,
socket: int,
cores: Union[str, List[int], List[str]],
@@ -1386,7 +1383,7 @@ class _EalParameter(object):
):
"""
generate eal parameters character string;
- :param dut: dut device;
+ :param sut_node: SUT Node;
:param fixed_prefix: use fixed file-prefix or not, when it is true,
the file-prefix will not be added a timestamp
:param socket: the physical CPU socket index, -1 means no care cpu socket;
@@ -1414,10 +1411,10 @@ class _EalParameter(object):
param other_eal_param: user defined DPDK eal parameters, eg:
other_eal_param='--single-file-segments';
"""
- self.os_type = dut.get_os_type()
+ self.os_type = sut_node.get_os_type()
self.fixed_prefix = fixed_prefix
self.socket = socket
- self.dut = dut
+ self.sut_node = sut_node
self.cores = self._validate_cores(cores)
self.ports = self._validate_ports(ports)
self.port_options: Dict = self._validate_port_options(port_options)
@@ -1489,7 +1486,7 @@ class _EalParameter(object):
)
if is_use_default_cores:
default_cores = "1S/2C/1T"
- core_list = self.dut.get_core_list(default_cores)
+ core_list = self.sut_node.get_core_list(default_cores)
else:
core_list = self._get_cores()
@@ -1524,7 +1521,7 @@ class _EalParameter(object):
def _make_memory_channels(self) -> str:
param_template = "-n {}"
- return param_template.format(self.dut.get_memory_channels())
+ return param_template.format(self.sut_node.get_memory_channels())
def _make_ports_param(self) -> str:
no_port_config = (
@@ -1543,10 +1540,10 @@ class _EalParameter(object):
def _make_default_ports_param(self) -> str:
pci_list = []
allow_option = self._make_allow_option()
- if len(self.dut.ports_info) != 0:
- for port_info in self.dut.ports_info:
+ if len(self.sut_node.ports_info) != 0:
+ for port_info in self.sut_node.ports_info:
pci_list.append("%s %s" % (allow_option, port_info["pci"]))
- self.dut.logger.info(pci_list)
+ self.sut_node.logger.info(pci_list)
return " ".join(pci_list)
def _make_b_ports_param(self) -> str:
@@ -1554,7 +1551,7 @@ class _EalParameter(object):
if len(self.b_ports) != 0:
for port in self.b_ports:
if type(port) == int:
- b_pci_list.append("-b %s" % self.dut.ports_info[port]["pci"])
+ b_pci_list.append("-b %s" % self.sut_node.ports_info[port]["pci"])
else:
b_pci_list = ["-b %s" % pci for pci in self.b_ports]
return " ".join(b_pci_list)
@@ -1567,11 +1564,11 @@ class _EalParameter(object):
def _make_prefix_param(self) -> str:
if self.prefix == "":
- fixed_file_prefix = "dpdk" + "_" + self.dut.prefix_subfix
+ fixed_file_prefix = "dpdk" + "_" + self.sut_node.prefix_subfix
else:
fixed_file_prefix = self.prefix
if not self.fixed_prefix:
- fixed_file_prefix = fixed_file_prefix + "_" + self.dut.prefix_subfix
+ fixed_file_prefix = fixed_file_prefix + "_" + self.sut_node.prefix_subfix
fixed_file_prefix = self._do_os_handle_with_prefix_param(fixed_file_prefix)
return fixed_file_prefix
@@ -1585,7 +1582,7 @@ class _EalParameter(object):
def _make_share_library_path_param(self) -> str:
use_shared_lib = settings.load_global_setting(settings.HOST_SHARED_LIB_SETTING)
shared_lib_path = settings.load_global_setting(settings.HOST_SHARED_LIB_PATH)
- if use_shared_lib == "true" and shared_lib_path and "Virt" not in str(self.dut):
+ if use_shared_lib == "true" and shared_lib_path and "Virt" not in str(self.sut_node):
return " -d {} ".format(shared_lib_path)
return ""
@@ -1611,7 +1608,7 @@ class _EalParameter(object):
if type(self.cores) == list:
return self.cores
elif isinstance(self.cores, str):
- return self.dut.get_core_list(self.cores, socket=self.socket)
+ return self.sut_node.get_core_list(self.cores, socket=self.socket)
def _get_ports_and_wraped_port_with_port_options(self) -> str:
w_pci_list = []
@@ -1621,7 +1618,7 @@ class _EalParameter(object):
def _add_port_options_to(self, port: Union[str, int]) -> str:
allow_option = self._make_allow_option()
- port_mac_addr = self.dut.ports_info[port]["pci"] if type(port) == int else port
+ port_mac_addr = self.sut_node.ports_info[port]["pci"] if type(port) == int else port
port_param = f"{allow_option} {port_mac_addr}"
port_option = self._get_port_options_from_config(port)
if port_option:
@@ -1636,16 +1633,16 @@ class _EalParameter(object):
def _make_allow_option(self) -> str:
is_new_dpdk_version = (
- self.dut.dpdk_version > "20.11.0-rc3" or self.dut.dpdk_version == "20.11.0"
+ self.sut_node.dpdk_version > "20.11.0-rc3" or self.sut_node.dpdk_version == "20.11.0"
)
return "-a" if is_new_dpdk_version else "-w"
def _do_os_handle_with_prefix_param(self, file_prefix: str) -> str:
- if self.dut.get_os_type() == "freebsd":
- self.dut.prefix_list = []
+ if self.sut_node.get_os_type() == "freebsd":
+ self.sut_node.prefix_list = []
file_prefix = ""
else:
- self.dut.prefix_list.append(file_prefix)
+ self.sut_node.prefix_list.append(file_prefix)
file_prefix = "--file-prefix=" + file_prefix
return file_prefix
diff --git a/framework/test_case.py b/framework/test_case.py
index 2831cb36..360befb3 100644
--- a/framework/test_case.py
+++ b/framework/test_case.py
@@ -37,11 +37,11 @@ from .utils import BLUE, RED
class TestCase(object):
- def __init__(self, duts, tester, target, suitename):
+ def __init__(self, sut_nodes, tg_node, target, suitename):
self.suite_name = suitename
- self.dut = duts[0]
- self.duts = duts
- self.tester = tester
+ self.sut_node = sut_nodes[0]
+ self.sut_nodes = sut_nodes
+ self.tg_node = tg_node
self.target = target
# local variable
@@ -49,19 +49,19 @@ class TestCase(object):
self._subtitle = None
# check session and reconnect if possible
- for dutobj in self.duts:
- self._check_and_reconnect(crb=dutobj)
- self._check_and_reconnect(crb=self.tester)
+ for sut_node in self.sut_nodes:
+ self._check_and_reconnect(node=sut_node)
+ self._check_and_reconnect(node=self.tg_node)
# convert netdevice to codename
- self.nic = self.dut.nic.name
- self.nic_obj = self.dut.nic
- self.kdriver = self.dut.nic.default_driver
- self.pkg = self.dut.nic.pkg
+ self.nic = self.sut_node.nic.name
+ self.nic_obj = self.sut_node.nic
+ self.kdriver = self.sut_node.nic.default_driver
+ self.pkg = self.sut_node.nic.pkg
# result object for save suite result
self._suite_result = Result()
- self._suite_result.dut = self.dut.crb["IP"]
+ self._suite_result.sut = self.sut_node.node["IP"]
self._suite_result.target = target
self._suite_result.nic = self.nic
self._suite_result.test_suite = self.suite_name
@@ -110,25 +110,25 @@ class TestCase(object):
self.logger = getLogger(class_name)
self.logger.config_suite(class_name)
- def _check_and_reconnect(self, crb=None):
+ def _check_and_reconnect(self, node=None):
try:
- result = crb.session.check_available()
+ result = node.session.check_available()
except:
result = False
if result is False:
- crb.reconnect_session()
- if "dut" in str(type(crb)):
- crb.send_expect("cd %s" % crb.base_dir, "#")
- crb.set_env_variable()
+ node.reconnect_session()
+ if "sut" in str(type(node)):
+ node.send_expect("cd %s" % node.base_dir, "#")
+ node.set_env_variable()
try:
- result = crb.alt_session.check_available()
+ result = node.alt_session.check_available()
except:
result = False
if result is False:
- crb.reconnect_session(alt_session=True)
+ node.reconnect_session(alt_session=True)
def set_up_all(self):
pass
@@ -238,9 +238,9 @@ class TestCase(object):
Execute suite setup_all function before cases.
"""
# clear all previous output
- for dutobj in self.duts:
- dutobj.get_session_output(timeout=0.1)
- self.tester.get_session_output(timeout=0.1)
+ for sut_node in self.sut_nodes:
+ sut_node.get_session_output(timeout=0.1)
+ self.tg_node.get_session_output(timeout=0.1)
# save into setup history list
self.enable_history(self.setup_history)
@@ -316,9 +316,9 @@ class TestCase(object):
self.running_case = case_name
# clean session
- for dutobj in self.duts:
- dutobj.get_session_output(timeout=0.1)
- self.tester.get_session_output(timeout=0.1)
+ for sut_node in self.sut_nodes:
+ sut_node.get_session_output(timeout=0.1)
+ self.tg_node.get_session_output(timeout=0.1)
# run set_up function for each case
self.set_up()
# run test case
@@ -379,13 +379,13 @@ class TestCase(object):
if load_global_setting(FUNC_SETTING) == "yes":
for case_obj in self._get_functional_cases():
- for i in range(self.tester.re_run_time + 1):
+ for i in range(self.tg_node.re_run_time + 1):
ret = self.execute_test_case(case_obj)
- if ret is False and self.tester.re_run_time:
- for dutobj in self.duts:
- dutobj.get_session_output(timeout=0.5 * (i + 1))
- self.tester.get_session_output(timeout=0.5 * (i + 1))
+ if ret is False and self.tg_node.re_run_time:
+ for sut_node in self.sut_nodes:
+ sut_node.get_session_output(timeout=0.5 * (i + 1))
+ self.tg_node.get_session_output(timeout=0.5 * (i + 1))
time.sleep(i + 1)
self.logger.info(
" Test case %s failed and re-run %d time"
@@ -449,14 +449,14 @@ class TestCase(object):
except Exception:
self.logger.error("tear_down_all failed:\n" + traceback.format_exc())
- for dutobj in self.duts:
- dutobj.kill_all()
- self.tester.kill_all()
+ for sut_node in self.sut_nodes:
+ sut_node.kill_all()
+ self.tg_node.kill_all()
- for dutobj in self.duts:
- dutobj.virt_exit()
+ for sut_node in self.sut_nodes:
+ sut_node.virt_exit()
# destroy all vfs
- dutobj.destroy_all_sriov_vfs()
+ sut_node.destroy_all_sriov_vfs()
def execute_tear_down(self):
"""
@@ -473,12 +473,12 @@ class TestCase(object):
def enable_history(self, history):
"""
- Enable history for all CRB's default session
+ Enable history for all Node's default session
"""
- for dutobj in self.duts:
- dutobj.session.set_history(history)
+ for sut_node in self.sut_nodes:
+ sut_node.session.set_history(history)
- self.tester.session.set_history(history)
+ self.tg_node.session.set_history(history)
def dump_history(self):
"""
@@ -495,8 +495,8 @@ class TestCase(object):
"""
bitrate = 1000.0 # 1Gb ('.0' forces to operate as float)
if self.nic == "any" or self.nic == "cfg":
- driver = self._get_nic_driver(self.dut.ports_info[0]["type"])
- nic = get_nic_name(self.dut.ports_info[0]["type"])
+ driver = self._get_nic_driver(self.sut_node.ports_info[0]["type"])
+ nic = get_nic_name(self.sut_node.ports_info[0]["type"])
else:
driver = self._get_nic_driver(self.nic)
nic = self.nic
@@ -526,7 +526,7 @@ class TestCase(object):
def bind_nic_driver(self, ports, driver=""):
for port in ports:
- netdev = self.dut.ports_info[port]["port"]
+ netdev = self.sut_node.ports_info[port]["port"]
driver_now = netdev.get_nic_driver()
driver_new = driver if driver else netdev.default_driver
if driver_new != driver_now:
diff --git a/framework/test_result.py b/framework/test_result.py
index b62ed120..13c14844 100644
--- a/framework/test_result.py
+++ b/framework/test_result.py
@@ -17,7 +17,7 @@ class Result(object):
This is presented to the user with a property based interface.
internals = [
- 'dut1', [
+ 'sut1', [
'kdriver',
'firmware',
'pkg',
@@ -44,156 +44,156 @@ class Result(object):
"""
def __init__(self):
- self.__dut = 0
+ self.__sut = 0
self.__target = 0
self.__test_suite = 0
self.__test_case = 0
self.__test_result = None
self.__message = None
self.__internals = []
- self.__failed_duts = {}
+ self.__failed_suts = {}
self.__failed_targets = {}
- def __set_dut(self, dut):
- if dut not in self.__internals:
- self.__internals.append(dut)
+ def __set_sut(self, sut):
+ if sut not in self.__internals:
+ self.__internals.append(sut)
self.__internals.append([])
- self.__dut = self.__internals.index(dut)
+ self.__sut = self.__internals.index(sut)
- def __get_dut(self):
- return self.__internals[self.__dut]
+ def __get_sut(self):
+ return self.__internals[self.__sut]
- def current_dpdk_version(self, dut):
+ def current_dpdk_version(self, sut):
"""
- Returns the dpdk version for a given DUT
+ Returns the dpdk version for a given SUT
"""
try:
- dut_idx = self.__internals.index(dut)
- return self.__internals[dut_idx + 1][4]
+ sut_idx = self.__internals.index(sut)
+ return self.__internals[sut_idx + 1][4]
except:
return ""
def __set_dpdk_version(self, dpdk_version):
- if dpdk_version not in self.internals[self.__dut + 1]:
+ if dpdk_version not in self.internals[self.__sut + 1]:
dpdk_current = self.__get_dpdk_version()
if dpdk_current:
if dpdk_version not in dpdk_current:
- self.internals[self.__dut + 1][4] = (
+ self.internals[self.__sut + 1][4] = (
dpdk_current + "/" + dpdk_version
)
else:
- self.internals[self.__dut + 1].append(dpdk_version)
+ self.internals[self.__sut + 1].append(dpdk_version)
def __get_dpdk_version(self):
try:
- return self.internals[self.__dut + 1][4]
+ return self.internals[self.__sut + 1][4]
except:
return ""
- def current_kdriver(self, dut):
+ def current_kdriver(self, sut):
"""
- Returns the driver version for a given DUT
+ Returns the driver version for a given SUT
"""
try:
- dut_idx = self.__internals.index(dut)
- return self.__internals[dut_idx + 1][0]
+ sut_idx = self.__internals.index(sut)
+ return self.__internals[sut_idx + 1][0]
except:
return ""
def __set_kdriver(self, driver):
- if not self.internals[self.__dut + 1]:
+ if not self.internals[self.__sut + 1]:
kdriver_current = self.__get_kdriver()
if kdriver_current:
if driver not in kdriver_current:
- self.internals[self.__dut + 1][0] = kdriver_current + "/" + driver
+ self.internals[self.__sut + 1][0] = kdriver_current + "/" + driver
else:
- self.internals[self.__dut + 1].append(driver)
+ self.internals[self.__sut + 1].append(driver)
def __get_kdriver(self):
try:
- return self.internals[self.__dut + 1][0]
+ return self.internals[self.__sut + 1][0]
except:
return ""
- def current_firmware_version(self, dut):
+ def current_firmware_version(self, sut):
"""
- Returns the firmware version for a given DUT
+ Returns the firmware version for a given SUT
"""
try:
- dut_idx = self.__internals.index(dut)
- return self.__internals[dut_idx + 1][1]
+ sut_idx = self.__internals.index(sut)
+ return self.__internals[sut_idx + 1][1]
except:
return ""
def __set_firmware(self, firmware):
- if firmware not in self.internals[self.__dut + 1]:
+ if firmware not in self.internals[self.__sut + 1]:
firmware_current = self.__get_firmware()
if firmware_current:
if firmware not in firmware_current:
- self.internals[self.__dut + 1][1] = (
+ self.internals[self.__sut + 1][1] = (
firmware_current + "/" + firmware
)
else:
- self.internals[self.__dut + 1].append(firmware)
+ self.internals[self.__sut + 1].append(firmware)
def __get_firmware(self):
try:
- return self.internals[self.__dut + 1][1]
+ return self.internals[self.__sut + 1][1]
except:
return ""
- def current_package_version(self, dut):
+ def current_package_version(self, sut):
"""
- Returns the DDP package version for a given DUT
+ Returns the DDP package version for a given SUT
"""
try:
- dut_idx = self.__internals.index(dut)
- return self.__internals[dut_idx + 1][2]
+ sut_idx = self.__internals.index(sut)
+ return self.__internals[sut_idx + 1][2]
except:
return ""
def __set_ddp_package(self, package):
- if package not in self.internals[self.__dut + 1]:
+ if package not in self.internals[self.__sut + 1]:
pkg_current = self.__get_ddp_package()
if pkg_current != "":
if pkg_current and package not in pkg_current:
- self.internals[self.__dut + 1][2] = pkg_current + "/" + package
+ self.internals[self.__sut + 1][2] = pkg_current + "/" + package
else:
- self.internals[self.__dut + 1].append(package)
+ self.internals[self.__sut + 1].append(package)
def __get_ddp_package(self):
try:
- return self.internals[self.__dut + 1][2]
+ return self.internals[self.__sut + 1][2]
except:
return ""
- def current_driver(self, dut):
+ def current_driver(self, sut):
"""
- Returns the DDP package version for a given DUT
+ Returns the DDP package version for a given SUT
"""
try:
- dut_idx = self.__internals.index(dut)
- return self.__internals[dut_idx + 1][3]
+ sut_idx = self.__internals.index(sut)
+ return self.__internals[sut_idx + 1][3]
except:
return ""
def __set_driver(self, driver):
- if driver not in self.internals[self.__dut + 1]:
+ if driver not in self.internals[self.__sut + 1]:
driver_current = self.__get_driver()
if driver_current:
if driver not in driver_current:
- self.internals[self.__dut + 1][3] = driver_current + "/" + driver
+ self.internals[self.__sut + 1][3] = driver_current + "/" + driver
else:
- self.internals[self.__dut + 1].append(driver)
+ self.internals[self.__sut + 1].append(driver)
def __get_driver(self):
try:
- return self.internals[self.__dut + 1][3]
+ return self.internals[self.__sut + 1][3]
except:
return ""
def __current_targets(self):
- return self.internals[self.__dut + 1]
+ return self.internals[self.__sut + 1]
def __set_target(self, target):
targets = self.__current_targets()
@@ -285,130 +285,130 @@ class Result(object):
"""
self.__set_test_case_result(result="BLOCKED", message=message)
- def all_duts(self):
+ def all_suts(self):
"""
- Returns all the DUTs it's aware of.
+ Returns all the SUTs it's aware of.
"""
return self.__internals[::2]
- def all_targets(self, dut):
+ def all_targets(self, sut):
"""
- Returns the targets for a given DUT
+ Returns the targets for a given SUT
"""
try:
- dut_idx = self.__internals.index(dut)
+ sut_idx = self.__internals.index(sut)
except:
return None
- return self.__internals[dut_idx + 1][5::3]
+ return self.__internals[sut_idx + 1][5::3]
- def current_nic(self, dut, target):
+ def current_nic(self, sut, target):
"""
- Returns the NIC for a given DUT and target
+ Returns the NIC for a given SUT and target
"""
try:
- dut_idx = self.__internals.index(dut)
- target_idx = self.__internals[dut_idx + 1].index(target)
+ sut_idx = self.__internals.index(sut)
+ target_idx = self.__internals[sut_idx + 1].index(target)
except:
return None
- return self.__internals[dut_idx + 1][target_idx + 1]
+ return self.__internals[sut_idx + 1][target_idx + 1]
- def all_test_suites(self, dut, target):
+ def all_test_suites(self, sut, target):
"""
- Returns all the test suites for a given DUT and target.
+ Returns all the test suites for a given SUT and target.
"""
try:
- dut_idx = self.__internals.index(dut)
- target_idx = self.__internals[dut_idx + 1].index(target)
+ sut_idx = self.__internals.index(sut)
+ target_idx = self.__internals[sut_idx + 1].index(target)
except:
return None
- return self.__internals[dut_idx + 1][target_idx + 2][::2]
+ return self.__internals[sut_idx + 1][target_idx + 2][::2]
- def all_test_cases(self, dut, target, suite):
+ def all_test_cases(self, sut, target, suite):
"""
- Returns all the test cases for a given DUT, target and test case.
+ Returns all the test cases for a given SUT, target and test case.
"""
try:
- dut_idx = self.__internals.index(dut)
- target_idx = self.__internals[dut_idx + 1].index(target)
- suite_idx = self.__internals[dut_idx + 1][target_idx + 2].index(suite)
+ sut_idx = self.__internals.index(sut)
+ target_idx = self.__internals[sut_idx + 1].index(target)
+ suite_idx = self.__internals[sut_idx + 1][target_idx + 2].index(suite)
except:
return None
- return self.__internals[dut_idx + 1][target_idx + 2][suite_idx + 1][::2]
+ return self.__internals[sut_idx + 1][target_idx + 2][suite_idx + 1][::2]
- def result_for(self, dut, target, suite, case):
+ def result_for(self, sut, target, suite, case):
"""
- Returns the test case result/message for a given DUT, target, test
+ Returns the test case result/message for a given SUT, target, test
suite and test case.
"""
try:
- dut_idx = self.__internals.index(dut)
- target_idx = self.__internals[dut_idx + 1].index(target)
- suite_idx = self.__internals[dut_idx + 1][target_idx + 2].index(suite)
- case_idx = self.__internals[dut_idx + 1][target_idx + 2][
+ sut_idx = self.__internals.index(sut)
+ target_idx = self.__internals[sut_idx + 1].index(target)
+ suite_idx = self.__internals[sut_idx + 1][target_idx + 2].index(suite)
+ case_idx = self.__internals[sut_idx + 1][target_idx + 2][
suite_idx + 1
].index(case)
except:
return None
- return self.__internals[dut_idx + 1][target_idx + 2][suite_idx + 1][
+ return self.__internals[sut_idx + 1][target_idx + 2][suite_idx + 1][
case_idx + 1
]
- def add_failed_dut(self, dut, msg):
+ def add_failed_sut(self, sut, msg):
"""
- Sets the given DUT as failing due to msg
+ Sets the given SUT as failing due to msg
"""
- self.__failed_duts[dut] = msg
+ self.__failed_suts[sut] = msg
- def remove_failed_dut(self, dut):
+ def remove_failed_sut(self, sut):
"""
- Remove the given DUT from failed duts collection
+ Remove the given SUT from failed SUTs collection
"""
- if dut in self.__failed_duts:
- self.__failed_duts.pop(dut)
+ if sut in self.__failed_suts:
+ self.__failed_suts.pop(sut)
- def is_dut_failed(self, dut):
+ def is_sut_failed(self, sut):
"""
- True if the given DUT was marked as failing
+ True if the given SUT was marked as failing
"""
- return dut in self.__failed_duts
+ return sut in self.__failed_suts
- def dut_failed_msg(self, dut):
+ def sut_failed_msg(self, sut):
"""
- Returns the reason of failure for a given DUT
+ Returns the reason of failure for a given SUT
"""
- return self.__failed_duts[dut]
+ return self.__failed_suts[sut]
- def add_failed_target(self, dut, target, msg):
+ def add_failed_target(self, sut, target, msg):
"""
- Sets the given DUT, target as failing due to msg
+ Sets the given SUT, target as failing due to msg
"""
- self.__failed_targets[dut + target] = msg
+ self.__failed_targets[sut + target] = msg
- def remove_failed_target(self, dut, target):
+ def remove_failed_target(self, sut, target):
"""
- Remove the given DUT, target from failed targets collection
+ Remove the given SUT, target from failed targets collection
"""
- key_word = dut + target
+ key_word = sut + target
if key_word in self.__failed_targets:
self.__failed_targets.pop(key_word)
- def is_target_failed(self, dut, target):
+ def is_target_failed(self, sut, target):
"""
- True if the given DUT,target were marked as failing
+ True if the given SUT,target were marked as failing
"""
- return (dut + target) in self.__failed_targets
+ return (sut + target) in self.__failed_targets
- def target_failed_msg(self, dut, target):
+ def target_failed_msg(self, sut, target):
"""
- Returns the reason of failure for a given DUT,target
+ Returns the reason of failure for a given SUT,target
"""
- return self.__failed_targets[dut + target]
+ return self.__failed_targets[sut + target]
"""
Attributes defined as properties to hide the implementation from the
presented interface.
"""
- dut = property(__get_dut, __set_dut)
+ sut = property(__get_sut, __set_sut)
dpdk_version = property(__get_dpdk_version, __set_dpdk_version)
kdriver = property(__get_kdriver, __set_kdriver)
driver = property(__get_driver, __set_driver)
diff --git a/framework/pktgen_ixia.py b/framework/tg_ixexplorer.py
similarity index 95%
rename from framework/pktgen_ixia.py
rename to framework/tg_ixexplorer.py
index b51deb98..23eefd54 100644
--- a/framework/pktgen_ixia.py
+++ b/framework/tg_ixexplorer.py
@@ -4,41 +4,34 @@
import os
import re
-import string
import time
from pprint import pformat
from scapy.packet import Packet
from scapy.utils import wrpcap
-from .pktgen_base import (
- PKTGEN_IXIA,
- TRANSMIT_CONT,
- TRANSMIT_M_BURST,
- TRANSMIT_S_BURST,
- PacketGenerator,
-)
-from .settings import SCAPY2IXIA
+from .settings import SCAPY2IXIA, TG_IXEXPLORER, TRANSMIT_CONT, TRANSMIT_S_BURST
from .ssh_connection import SSHConnection
-from .utils import convert_int2ip, convert_ip2int, convert_mac2long, convert_mac2str
+from .tg_perf import TrafficGenerator
+from .utils import convert_ip2int, convert_mac2long
-class Ixia(SSHConnection):
+class IxExplorer(SSHConnection):
"""
- IXIA performance measurement class.
+ IxExplorer performance measurement class.
"""
- def __init__(self, tester, ixiaPorts, logger):
- self.tester = tester
- self.NAME = PKTGEN_IXIA
- super(Ixia, self).__init__(
+ def __init__(self, tg_node, ixiaPorts, logger):
+ self.tg_node = tg_node
+ self.NAME = TG_IXEXPLORER
+ super(IxExplorer, self).__init__(
self.get_ip_address(),
self.NAME,
- self.tester.get_username(),
+ self.tg_node.get_username(),
self.get_password(),
)
self.logger = logger
- super(Ixia, self).init_log(self.logger)
+ super(IxExplorer, self).init_log(self.logger)
self.tcl_cmds = []
self.chasId = None
@@ -82,10 +75,10 @@ class Ixia(SSHConnection):
return self.send_expect("stat getLineSpeed %s" % ixia_port, "%")
def get_ip_address(self):
- return self.tester.get_ip_address()
+ return self.tg_node.get_ip_address()
def get_password(self):
- return self.tester.get_password()
+ return self.tg_node.get_password()
def add_tcl_cmd(self, cmd):
"""
@@ -107,7 +100,7 @@ class Ixia(SSHConnection):
self.close()
def parse_pcap(self, fpcap):
- # save Packet instance to pcap file
+ # save ScapyTrafficGenerator instance to pcap file
if isinstance(fpcap, Packet):
pcap_path = "/root/temp.pcap"
if os.path.exists(pcap_path):
@@ -550,7 +543,7 @@ class Ixia(SSHConnection):
method = getattr(self, method_name.lower())
method(ixia_port, vm.get("fields_config", {}), **params)
self.add_tcl_cmd("stream set %s %d" % (ixia_port, stream_id))
- # only use one packet format in pktgen
+ # only use one packet format in traffic generator
break
# set commands at last stream
@@ -922,7 +915,7 @@ class Ixia(SSHConnection):
Run commands in command list.
"""
fileContent = "\n".join(self.tcl_cmds) + "\n"
- self.tester.create_file(fileContent, "ixiaConfig.tcl")
+ self.tg_node.create_file(fileContent, "ixiaConfig.tcl")
self.send_expect("source ixiaConfig.tcl", "% ", 75)
def configure_transmission(self, option=None):
@@ -1088,16 +1081,16 @@ class Ixia(SSHConnection):
"configStream %s %s %s %s" % (stream, rate, prio, types), "% ", 100
)
- def get_connection_relation(self, dutPorts):
+ def get_connection_relation(self, sutPorts):
"""
- Get the connect relations between DUT and Ixia.
+ Get the connect relations between SUT and Ixia.
"""
- for port in dutPorts:
- info = self.tester.get_pci(self.tester.get_local_port(port)).split(".")
+ for port in sutPorts:
+ info = self.tg_node.get_pci(self.tg_node.get_local_port(port)).split(".")
self.conRelation[port] = [
int(info[0]),
int(info[1]),
- repr(self.tester.dut.get_mac_address(port).replace(":", " ").upper()),
+ repr(self.tg_node.sut_node.get_mac_address(port).replace(":", " ").upper()),
]
return self.conRelation
@@ -1144,7 +1137,7 @@ class Ixia(SSHConnection):
"""
if self.isalive():
self.send_expect("exit", "# ")
- super(Ixia, self).close()
+ super(IxExplorer, self).close()
def stat_get_stat_all_stats(self, port_number):
"""
@@ -1323,7 +1316,7 @@ class Ixia(SSHConnection):
return rxPackets
# ---------------------------------------------------------
- # extend methods for pktgen subclass `IxiaPacketGenerator
+ # extend methods for traffic generator subclass IxiaTrafficGenerator
# ---------------------------------------------------------
def disconnect(self):
"""quit from ixia server"""
@@ -1447,19 +1440,19 @@ class Ixia(SSHConnection):
return stats
-class IxiaPacketGenerator(PacketGenerator):
+class IxExplorerTrafficGenerator(TrafficGenerator):
"""
- Ixia packet generator
+ Ixia traffic generator
"""
- def __init__(self, tester):
- super(IxiaPacketGenerator, self).__init__(tester)
+ def __init__(self, tg_node):
+ super(IxExplorerTrafficGenerator, self).__init__(tg_node)
# ixia management
- self.pktgen_type = PKTGEN_IXIA
+ self.tg_type = TG_IXEXPLORER
self._conn = None
# ixia configuration information of dts
conf_inst = self._get_generator_conf_instance()
- self.conf = conf_inst.load_pktgen_config()
+ self.conf = conf_inst.load_tg_config()
# ixia port configuration
self._traffic_opt = {}
self._traffic_ports = []
@@ -1484,23 +1477,23 @@ class IxiaPacketGenerator(PacketGenerator):
"count",
]
- self.tester = tester
+ self.tg_node = tg_node
def get_ports(self):
- """only used for ixia packet generator"""
+ """only used for ixia traffic generator"""
return self._conn.get_ports()
def _prepare_generator(self):
"""start ixia server"""
try:
- self._connect(self.tester, self.conf)
+ self._connect(self.tg_node, self.conf)
except Exception as e:
msg = "failed to connect to ixia server"
raise Exception(msg)
- def _connect(self, tester, conf):
+ def _connect(self, tg_node, conf):
# initialize ixia class
- self._conn = Ixia(tester, conf, self.logger)
+ self._conn = IxExplorer(tg_node, conf, self.logger)
for p in self._conn.get_ports():
self._ports.append(p)
@@ -1522,8 +1515,8 @@ class IxiaPacketGenerator(PacketGenerator):
"""
get ixia port pci address
"""
- for pktgen_port_id, info in enumerate(self._ports):
- if pktgen_port_id == port_id:
+ for tg_port_id, info in enumerate(self._ports):
+ if tg_port_id == port_id:
_pci = info.get("pci")
return _pci
else:
@@ -1531,18 +1524,18 @@ class IxiaPacketGenerator(PacketGenerator):
def _get_gen_port(self, pci):
"""
- get port management id of the packet generator
+ get port management id of the traffic generator
"""
- for pktgen_port_id, info in enumerate(self._ports):
+ for tg_port_id, info in enumerate(self._ports):
_pci = info.get("pci")
if _pci == pci:
- return pktgen_port_id
+ return tg_port_id
else:
return -1
def _is_gen_port(self, pci):
"""
- check if a pci address is managed by the packet generator
+ check if a pci address is managed by the traffic generator
"""
for name, _port_obj in self._conn.ports.items():
_pci = _port_obj.info["pci_addr"]
@@ -1563,7 +1556,8 @@ class IxiaPacketGenerator(PacketGenerator):
@property
def _vm_conf(self):
- # close it and wait for more discussion about pktgen framework
+ # close it and wait for more discussion about traffic generator
+ # framework
return None
conf = {}
# get the subnet range of src and dst ip
@@ -1584,13 +1578,13 @@ class IxiaPacketGenerator(PacketGenerator):
return conf if conf else None
def _clear_streams(self):
- """clear streams in `PacketGenerator`"""
+ """clear streams in `TrafficGenerator`"""
# if streams has been attached, remove them from trex server.
self._remove_all_streams()
def _remove_all_streams(self):
"""
- remove all stream deployed on the packet generator
+ remove all stream deployed on the traffic generator
"""
if not self.get_streams():
return
@@ -1623,7 +1617,7 @@ class IxiaPacketGenerator(PacketGenerator):
self._conn.config_port_flow_control(rx_ports, flow_ctrl_opt)
def _throughput_stats(self, stream, stats):
- """convert ixia throughput statistics format to dts PacketGenerator format"""
+ """convert ixia throughput statistics format to dts TrafficGenerator format"""
# tx packet
tx_port_id = stream["tx_port"]
port_stats = stats.get(tx_port_id)
@@ -1657,7 +1651,7 @@ class IxiaPacketGenerator(PacketGenerator):
return rx_bps, rx_pps
def _loss_rate_stats(self, stream, stats):
- """convert ixia loss rate statistics format to dts PacketGenerator format"""
+ """convert ixia loss rate statistics format to dts TrafficGenerator format"""
# tx packet
port_id = stream.get("tx_port")
if port_id in list(stats.keys()):
@@ -1679,7 +1673,7 @@ class IxiaPacketGenerator(PacketGenerator):
return opackets, ipackets
def _latency_stats(self, stream, stats):
- """convert ixia latency statistics format to dts PacketGenerator format"""
+ """convert ixia latency statistics format to dts TrafficGenerator format"""
port_id = stream.get("tx_port")
if port_id in list(stats.keys()):
port_stats = stats[port_id]
@@ -1702,7 +1696,7 @@ class IxiaPacketGenerator(PacketGenerator):
##########################################################################
#
- # class ``PacketGenerator`` abstract methods should be implemented here
+ # class ``TrafficGenerator`` abstract methods should be implemented here
#
##########################################################################
def _prepare_transmission(self, stream_ids=[], latency=False):
@@ -1727,10 +1721,11 @@ class IxiaPacketGenerator(PacketGenerator):
port_config[tx_port] = []
config = {}
config.update(options)
- # In pktgen, all streams flow control option are the same by design.
+ # In traffic generator, all streams flow control option are
+ # the same by design.
self._traffic_opt["flow_control"] = options.get("flow_control") or {}
- # if vm config by pktgen config file, set it here to take the place
- # of setting on suite
+ # if vm config by traffic generator config file, set it here
+ # to take the place of setting on suite
if self._vm_conf: # TBD, remove this process later
config["fields_config"] = self._vm_conf
# get stream rate percent
@@ -1741,7 +1736,7 @@ class IxiaPacketGenerator(PacketGenerator):
port_config[tx_port].append(ixia_option)
if not port_config:
- msg = "no stream options for ixia packet generator"
+ msg = "no stream options for ixia traffic generator"
raise Exception(msg)
# -------------------------------------------------------------------
port_lists = []
@@ -1762,7 +1757,7 @@ class IxiaPacketGenerator(PacketGenerator):
msg = (
"{0} only support set rate percent in streams, "
"current run traffic with stream rate percent"
- ).format(self.pktgen_type)
+ ).format(self.tg_type)
self.logger.warning(msg)
# run ixia server
try:
@@ -1802,7 +1797,7 @@ class IxiaPacketGenerator(PacketGenerator):
def _check_options(self, opts={}):
# remove it to upper level class and wait for more discussion about
- # pktgen framework
+ # traffic generator framework
return True
for key in opts:
if key in self.options_keys:
diff --git a/framework/pktgen_ixia_network.py b/framework/tg_ixnetwork.py
similarity index 83%
rename from framework/pktgen_ixia_network.py
rename to framework/tg_ixnetwork.py
index ab5e71ac..87a1c2c6 100644
--- a/framework/pktgen_ixia_network.py
+++ b/framework/tg_ixnetwork.py
@@ -2,33 +2,31 @@
# Copyright(c) 2010-2021 Intel Corporation
#
-import os
-import time
import traceback
from pprint import pformat
-from .pktgen_base import PKTGEN_IXIA_NETWORK, PacketGenerator
+from .tg_perf import TG_IXNETWORK, TrafficGenerator
-class IxNetworkPacketGenerator(PacketGenerator):
+class IxNetworkTrafficGenerator(TrafficGenerator):
"""
- ixNetwork packet generator
+ ixNetwork traffic generator
"""
- def __init__(self, tester):
- super(IxNetworkPacketGenerator, self).__init__(tester)
- self.pktgen_type = PKTGEN_IXIA_NETWORK
+ def __init__(self, tg_node):
+ super(IxNetworkTrafficGenerator, self).__init__(tg_node)
+ self.tg_type = TG_IXNETWORK
self._conn = None
# ixNetwork configuration information of dts
conf_inst = self._get_generator_conf_instance()
- self.conf = conf_inst.load_pktgen_config()
+ self.conf = conf_inst.load_tg_config()
# ixNetwork port configuration
self._traffic_ports = []
self._ports = []
self._rx_ports = []
def get_ports(self):
- """used for ixNetwork packet generator"""
+ """used for ixNetwork traffic generator"""
return self._conn.get_ports()
def _prepare_generator(self):
@@ -43,7 +41,7 @@ class IxNetworkPacketGenerator(PacketGenerator):
# initialize ixNetwork class
from framework.ixia_network import IxNetwork
- self._conn = IxNetwork(self.pktgen_type, conf, self.logger)
+ self._conn = IxNetwork(self.tg_type, conf, self.logger)
for p in self._conn.get_ports():
self._ports.append(p)
@@ -70,8 +68,8 @@ class IxNetworkPacketGenerator(PacketGenerator):
"""
get ixNetwork port pci address
"""
- for pktgen_port_id, info in enumerate(self._ports):
- if pktgen_port_id == port_id:
+ for tg_port_id, info in enumerate(self._ports):
+ if tg_port_id == port_id:
_pci = info.get("pci")
return _pci
else:
@@ -79,18 +77,18 @@ class IxNetworkPacketGenerator(PacketGenerator):
def _get_gen_port(self, pci):
"""
- get port management id of the packet generator
+ get port management id of the traffic generator
"""
- for pktgen_port_id, info in enumerate(self._ports):
+ for tg_port_id, info in enumerate(self._ports):
_pci = info.get("pci")
if _pci == pci:
- return pktgen_port_id
+ return tg_port_id
else:
return -1
def _is_gen_port(self, pci):
"""
- check if a pci address is managed by the packet generator
+ check if a pci address is managed by the traffic generator
"""
for name, _port_obj in self._conn.ports.items():
_pci = _port_obj.info["pci_addr"]
@@ -114,13 +112,13 @@ class IxNetworkPacketGenerator(PacketGenerator):
return self._conn.send_ping6(pci, mac, ipv6)
def _clear_streams(self):
- """clear streams in `PacketGenerator`"""
+ """clear streams in `TrafficGenerator`"""
# if streams has been attached, remove them from ixNetwork api server.
self._remove_all_streams()
def _remove_all_streams(self):
"""
- remove all stream deployed on the packet generator
+ remove all stream deployed on the traffic generator
"""
if not self.get_streams():
return
@@ -142,7 +140,7 @@ class IxNetworkPacketGenerator(PacketGenerator):
##########################################################################
#
- # class ``PacketGenerator`` abstract methods should be implemented here
+ # class ``TrafficGenerator`` abstract methods should be implemented here
#
##########################################################################
def _prepare_transmission(self, stream_ids=[], latency=False):
@@ -176,7 +174,7 @@ class IxNetworkPacketGenerator(PacketGenerator):
self.rate_percent = rate_percent
if not port_config:
- msg = "no stream options for ixNetwork packet generator"
+ msg = "no stream options for ixNetwork traffic generator"
raise Exception(msg)
port_lists = []
diff --git a/framework/tester.py b/framework/tg_node.py
similarity index 76%
rename from framework/tester.py
rename to framework/tg_node.py
index 7414efea..5dc095c1 100644
--- a/framework/tester.py
+++ b/framework/tg_node.py
@@ -9,77 +9,82 @@ Interface for bulk traffic generators.
import os
import random
import re
-import subprocess
-from multiprocessing import Process
from time import sleep
from nics.net_device import GetNicObj
-from .config import PktgenConf
-from .crb import Crb
from .exception import ParameterInvalidException
-from .packet import (
- Packet,
+from .node import Node
+from .scapy_packet_builder import (
+ ScapyPacketBuilder,
compare_pktload,
get_scapy_module_impcmd,
start_tcpdump,
stop_and_load_tcpdump_packets,
strip_pktload,
)
-from .pktgen import getPacketGenerator
from .settings import (
NICS,
PERF_SETTING,
- PKTGEN,
- PKTGEN_GRP,
+ PERF_TG_CONF_KEY,
+ PERF_TG_TYPES,
+ TG_DPDK,
+ TG_IXEXPLORER,
+ TG_IXNETWORK,
+ TG_TREX,
USERNAME,
load_global_setting,
)
-from .utils import GREEN, check_crb_python_version, convert_int2ip, convert_ip2int
+from .tg_ixexplorer import IxExplorerTrafficGenerator
+from .tg_ixnetwork import IxNetworkTrafficGenerator
+from .tg_perf import DpdkTrafficGenerator
+from .tg_trex import TrexTrafficGenerator
+from .utils import GREEN, check_node_python_version, convert_int2ip, convert_ip2int
-class Tester(Crb):
+class TrafficGeneratorNode(Node):
"""
- Start the DPDK traffic generator on the machine `target`.
- A config file and pcap file must have previously been copied
- to this machine.
+ A class for managing connections to the node running the Traffic generator,
+ providing methods that retrieve the necessary information about the node
+ (such as cpu, memory and NIC details), configure it and configure and
+ manage the Traffic generator.
"""
- PORT_INFO_CACHE_KEY = "tester_port_info"
- CORE_LIST_CACHE_KEY = "tester_core_list"
- NUMBER_CORES_CACHE_KEY = "tester_number_cores"
- PCI_DEV_CACHE_KEY = "tester_pci_dev_info"
+ PORT_INFO_CACHE_KEY = "tg_port_info"
+ CORE_LIST_CACHE_KEY = "tg_core_list"
+ NUMBER_CORES_CACHE_KEY = "tg_number_cores"
+ PCI_DEV_CACHE_KEY = "tg_pci_dev_info"
- def __init__(self, crb, serializer):
- self.NAME = "tester"
+ def __init__(self, node, serializer):
+ self.NAME = "tg"
self.scapy_session = None
- super(Tester, self).__init__(crb, serializer, name=self.NAME)
- # check the python version of tester
- check_crb_python_version(self)
+ super(TrafficGeneratorNode, self).__init__(node, serializer, name=self.NAME)
+ # check the python version of TG
+ check_node_python_version(self)
self.bgProcIsRunning = False
- self.duts = []
+ self.sut_nodes = []
self.inBg = 0
self.scapyCmds = []
self.bgCmds = []
self.bgItf = ""
self.re_run_time = 0
- self.pktgen = None
+ self.perf_tg = None
# prepare for scapy env
self.scapy_sessions_li = list()
self.scapy_session = self.prepare_scapy_env()
self.check_scapy_version()
- self.tmp_file = "/tmp/tester/"
+ self.tmp_file = "/tmp/tg/"
out = self.send_expect("ls -d %s" % self.tmp_file, "# ", verify=True)
if out == 2:
self.send_expect("mkdir -p %s" % self.tmp_file, "# ")
def prepare_scapy_env(self):
session_name = (
- "tester_scapy"
+ "tg_scapy"
if not self.scapy_sessions_li
- else f"tester_scapy_{random.random()}"
+ else f"tg_scapy_{random.random()}"
)
session = self.create_session(session_name)
self.scapy_sessions_li.append(session)
@@ -104,17 +109,17 @@ class Tester(Crb):
require_version = value.group(1)
if cur_version != require_version:
self.logger.warning(
- "The scapy vesrion not meet the requirement on tester,"
+ "The scapy vesrion not meet the requirement on TG,"
+ "please update your scapy, otherwise maybe some suite will failed"
)
def init_ext_gen(self):
"""
- Initialize tester packet generator object.
+ Initialize TG traffic generator object.
"""
if self.it_uses_external_generator():
- if self.is_pktgen:
- self.pktgen_init()
+ if self.uses_perf_tg:
+ self.perf_tg_init()
return
def set_re_run(self, re_run_time):
@@ -125,37 +130,37 @@ class Tester(Crb):
def get_ip_address(self):
"""
- Get ip address of tester CRB.
+ Get ip address of TG Node.
"""
- return self.crb["tester IP"]
+ return self.node["tg IP"]
def get_username(self):
"""
- Get login username of tester CRB.
+ Get login username of TG Node.
"""
return USERNAME
def get_password(self):
"""
- Get tester login password of tester CRB.
+ Get TG login password of TG Node.
"""
- return self.crb["tester pass"]
+ return self.node["tg pass"]
@property
- def is_pktgen(self):
+ def uses_perf_tg(self):
"""
- Check whether packet generator is configured.
+ Check whether traffic generator is configured.
"""
- if PKTGEN not in self.crb or not self.crb[PKTGEN]:
+ if PERF_TG_CONF_KEY not in self.node or not self.node[PERF_TG_CONF_KEY]:
return False
- if self.crb[PKTGEN].lower() in PKTGEN_GRP:
+ if self.node[PERF_TG_CONF_KEY].lower() in PERF_TG_TYPES:
return True
else:
msg = os.linesep.join(
[
- "Packet generator <{0}> is not supported".format(self.crb[PKTGEN]),
- "Current supports: {0}".format(" | ".join(PKTGEN_GRP)),
+ "traffic generator <{0}> is not supported".format(self.node[PERF_TG_CONF_KEY]),
+ "Current supports: {0}".format(" | ".join(PERF_TG_TYPES)),
]
)
self.logger.info(msg)
@@ -166,8 +171,9 @@ class Tester(Crb):
Check whether performance test will base on IXIA equipment.
"""
try:
- # if pktgen_group is set, take pktgen config file as first selection
- if self.is_pktgen:
+ # if perf_tg is set, take traffic generator config file
+ # as first selection
+ if self.uses_perf_tg:
return True
except Exception as e:
return False
@@ -183,13 +189,13 @@ class Tester(Crb):
and self.has_external_traffic_generator()
)
- def tester_prerequisites(self):
+ def tg_prerequisites(self):
"""
Prerequest function should be called before execute any test case.
- Will call function to scan all lcore's information which on Tester.
+ Will call function to scan all lcore's information which on TG.
Then call pci scan function to collect nic device information.
Then discovery the network topology and save it into cache file.
- At last setup DUT' environment for validation.
+ At last setup SUT' environment for validation.
"""
self.init_core_list()
self.pci_devices_information()
@@ -200,7 +206,7 @@ class Tester(Crb):
def disable_lldp(self):
"""
- Disable tester ports LLDP.
+ Disable TG ports LLDP.
"""
result = self.send_expect("lldpad -d", "# ")
if result:
@@ -227,27 +233,27 @@ class Tester(Crb):
def get_local_port(self, remotePort):
"""
- Return tester local port connect to specified dut port.
+ Return TG local port connect to specified SUT port.
"""
- return self.duts[0].ports_map[remotePort]
+ return self.sut_nodes[0].ports_map[remotePort]
def get_local_port_type(self, remotePort):
"""
- Return tester local port type connect to specified dut port.
+ Return TG local port type connect to specified SUT port.
"""
return self.ports_info[self.get_local_port(remotePort)]["type"]
- def get_local_port_bydut(self, remotePort, dutIp):
+ def get_local_port_bysut(self, remotePort, sutIp):
"""
- Return tester local port connect to specified port and specified dut.
+ Return TG local port connect to specified port and specified SUT.
"""
- for dut in self.duts:
- if dut.crb["My IP"] == dutIp:
- return dut.ports_map[remotePort]
+ for sut in self.sut_nodes:
+ if sut.node["My IP"] == sutIp:
+ return sut.ports_map[remotePort]
def get_local_index(self, pci):
"""
- Return tester local port index by pci id
+ Return TG local port index by pci id
"""
index = -1
for port in self.ports_info:
@@ -258,7 +264,7 @@ class Tester(Crb):
def get_pci(self, localPort):
"""
- Return tester local port pci id.
+ Return TG local port pci id.
"""
if localPort == -1:
raise ParameterInvalidException("local port should not be -1")
@@ -267,7 +273,7 @@ class Tester(Crb):
def get_interface(self, localPort):
"""
- Return tester local port interface name.
+ Return TG local port interface name.
"""
if localPort == -1:
raise ParameterInvalidException("local port should not be -1")
@@ -279,7 +285,7 @@ class Tester(Crb):
def get_mac(self, localPort):
"""
- Return tester local port mac address.
+ Return TG local port mac address.
"""
if localPort == -1:
raise ParameterInvalidException("local port should not be -1")
@@ -390,41 +396,41 @@ class Tester(Crb):
cached_ports_info.append(port_info)
self.serializer.save(self.PORT_INFO_CACHE_KEY, cached_ports_info)
- def _scan_pktgen_ports(self):
- """packet generator port setting
- Currently, trex run on tester node
+ def _scan_tg_ports(self):
+ """traffic generator port setting
+ Currently, trex run on TG node
"""
new_ports_info = []
- pktgen_ports_info = self.pktgen.get_ports()
- for pktgen_port_info in pktgen_ports_info:
- pktgen_port_type = pktgen_port_info["type"]
- if pktgen_port_type.lower() == "ixia":
- self.ports_info.extend(pktgen_ports_info)
+ tg_ports_info = self.perf_tg.get_ports()
+ for tg_port_info in tg_ports_info:
+ tg_port_type = tg_port_info["type"]
+ if tg_port_type.lower() == "ixia":
+ self.ports_info.extend(tg_ports_info)
break
- pktgen_port_name = pktgen_port_info["intf"]
- pktgen_pci = pktgen_port_info["pci"]
- pktgen_mac = pktgen_port_info["mac"]
+ tg_port_name = tg_port_info["intf"]
+ tg_pci = tg_port_info["pci"]
+ tg_mac = tg_port_info["mac"]
for port_info in self.ports_info:
dts_pci = port_info["pci"]
- if dts_pci != pktgen_pci:
+ if dts_pci != tg_pci:
continue
- port_info["intf"] = pktgen_port_name
- port_info["type"] = pktgen_port_type
- port_info["mac"] = pktgen_mac
+ port_info["intf"] = tg_port_name
+ port_info["type"] = tg_port_type
+ port_info["mac"] = tg_mac
break
- # Since tester port scanning work flow change, non-functional port
- # mapping config will be ignored. Add tester port mapping if no
+ # Since TG port scanning work flow change, non-functional port
+ # mapping config will be ignored. Add TG port mapping if no
# port in ports info
else:
- addr_array = pktgen_pci.split(":")
+ addr_array = tg_pci.split(":")
port = GetNicObj(self, addr_array[0], addr_array[1], addr_array[2])
new_ports_info.append(
{
"port": port,
- "intf": pktgen_port_name,
- "type": pktgen_port_type,
- "pci": pktgen_pci,
- "mac": pktgen_mac,
+ "intf": tg_port_name,
+ "type": tg_port_type,
+ "pci": tg_pci,
+ "mac": tg_mac,
"ipv4": None,
"ipv6": None,
}
@@ -434,7 +440,7 @@ class Tester(Crb):
def scan_ports(self):
"""
- Scan all ports on tester and save port's pci/mac/interface.
+ Scan all ports on TG and save port's pci/mac/interface.
"""
if self.read_cache:
self.load_serializer_ports()
@@ -443,8 +449,8 @@ class Tester(Crb):
if not self.read_cache or self.ports_info is None:
self.scan_ports_uncached()
if self.it_uses_external_generator():
- if self.is_pktgen:
- self._scan_pktgen_ports()
+ if self.uses_perf_tg:
+ self._scan_tg_ports()
self.save_serializer_ports()
for port_info in self.ports_info:
@@ -467,21 +473,21 @@ class Tester(Crb):
intf = port.get_interface_name()
self.logger.info(
- "Tester cached: [000:%s %s] %s"
+ "TG cached: [000:%s %s] %s"
% (port_info["pci"], port_info["type"], intf)
)
port_info["port"] = port
def scan_ports_uncached(self):
"""
- Return tester port pci/mac/interface information.
+ Return TG port pci/mac/interface information.
"""
self.ports_info = []
for (pci_bus, pci_id) in self.pci_devices_info:
# ignore unknown card types
if pci_id not in list(NICS.values()):
- self.logger.info("Tester: [%s %s] %s" % (pci_bus, pci_id, "unknow_nic"))
+ self.logger.info("TG: [%s %s] %s" % (pci_bus, pci_id, "unknow_nic"))
continue
addr_array = pci_bus.split(":")
@@ -494,11 +500,11 @@ class Tester(Crb):
if "No such file" in intf:
self.logger.info(
- "Tester: [%s %s] %s" % (pci_bus, pci_id, "unknow_interface")
+ "TG: [%s %s] %s" % (pci_bus, pci_id, "unknow_interface")
)
continue
- self.logger.info("Tester: [%s %s] %s" % (pci_bus, pci_id, intf))
+ self.logger.info("TG: [%s %s] %s" % (pci_bus, pci_id, intf))
macaddr = port.get_mac_addr()
ipv6 = port.get_ipv6_addr()
@@ -523,7 +529,7 @@ class Tester(Crb):
intf = port.get_interface2_name()
- self.logger.info("Tester: [%s %s] %s" % (pci_bus, pci_id, intf))
+ self.logger.info("TG: [%s %s] %s" % (pci_bus, pci_id, intf))
macaddr = port.get_intf2_mac_addr()
ipv6 = port.get_ipv6_addr()
@@ -540,15 +546,15 @@ class Tester(Crb):
}
)
- def pktgen_init(self):
+ def perf_tg_init(self):
"""
- initialize packet generator instance
+ initialize traffic generator instance
"""
- pktgen_type = self.crb[PKTGEN]
- # init packet generator instance
- self.pktgen = getPacketGenerator(self, pktgen_type)
+ tg_type = self.node[PERF_TG_CONF_KEY]
+ # init traffic generator instance
+ self.perf_tg = get_perf_tg(self, tg_type)
# prepare running environment
- self.pktgen.prepare_generator()
+ self.perf_tg.prepare_generator()
def send_ping(self, localPort, ipv4, mac):
"""
@@ -568,9 +574,9 @@ class Tester(Crb):
"""
Send ping6 packet from local port with destination ipv6 address.
"""
- if self.is_pktgen:
+ if self.uses_perf_tg:
if self.ports_info[localPort]["type"].lower() in "ixia":
- return self.pktgen.send_ping6(
+ return self.perf_tg.send_ping6(
self.ports_info[localPort]["pci"], mac, ipv6
)
elif self.ports_info[localPort]["type"].lower() == "trex":
@@ -585,7 +591,7 @@ class Tester(Crb):
def get_port_numa(self, port):
"""
- Return tester local port numa.
+ Return TG local port numa.
"""
pci = self.ports_info[port]["pci"]
out = self.send_expect("cat /sys/bus/pci/devices/%s/numa_node" % pci, "#")
@@ -683,13 +689,13 @@ class Tester(Crb):
return out
- def parallel_transmit_ptks(self, pkt=None, intf="", send_times=1, interval=0.01):
+ def parallel_transmit_ptks(self, scapy_pkt_builder=None, intf="", send_times=1, interval=0.01):
"""
Callable function for parallel processes
"""
print(GREEN("Transmitting and sniffing packets, please wait few minutes..."))
- return pkt.send_pkt_bg_with_pcapfile(
- crb=self, tx_port=intf, count=send_times, loop=0, inter=interval
+ return scapy_pkt_builder.send_pkt_bg_with_pcapfile(
+ node=self, tx_port=intf, count=send_times, loop=0, inter=interval
)
def check_random_pkts(
@@ -714,8 +720,8 @@ class Tester(Crb):
self.logger.info(
GREEN("Preparing transmit packets, please wait few minutes...")
)
- pkt = Packet()
- pkt.generate_random_pkts(
+ scapy_pkt_builder = ScapyPacketBuilder()
+ scapy_pkt_builder.generate_random_pkts(
pktnum=pktnum,
random_type=random_type,
ip_increase=True,
@@ -723,7 +729,7 @@ class Tester(Crb):
options={"layers_config": params},
)
- tx_pkts[txport] = pkt
+ tx_pkts[txport] = scapy_pkt_builder
# sniff packets
inst = start_tcpdump(
self,
@@ -740,7 +746,7 @@ class Tester(Crb):
txIntf = self.get_interface(txport)
bg_sessions.append(
self.parallel_transmit_ptks(
- pkt=tx_pkts[txport], intf=txIntf, send_times=1, interval=interval
+ scapy_pkt_builder=tx_pkts[txport], intf=txIntf, send_times=1, interval=interval
)
)
# Verify all packets
@@ -761,17 +767,17 @@ class Tester(Crb):
self.logger.info(
"exceeded timeout, force to stop background packet sending to avoid dead loop"
)
- Packet.stop_send_pkt_bg(i)
+ ScapyPacketBuilder.stop_send_pkt_bg(i)
prev_id = -1
for txport, rxport in portList:
p = stop_and_load_tcpdump_packets(rx_inst[rxport])
- recv_pkts = p.pktgen.pkts
+ recv_pkts = p.scapy_pkt_util.pkts
# only report when received number not matched
- if len(tx_pkts[txport].pktgen.pkts) > len(recv_pkts):
+ if len(tx_pkts[txport].scapy_pkt_util.pkts) > len(recv_pkts):
self.logger.info(
(
"Pkt number not matched,%d sent and %d received\n"
- % (len(tx_pkts[txport].pktgen.pkts), len(recv_pkts))
+ % (len(tx_pkts[txport].scapy_pkt_util.pkts), len(recv_pkts))
)
)
if allow_miss is False:
@@ -805,7 +811,7 @@ class Tester(Crb):
if (
compare_pktload(
- tx_pkts[txport].pktgen.pkts[idx], recv_pkts[idx], "L4"
+ tx_pkts[txport].scapy_pkt_util.pkts[idx], recv_pkts[idx], "L4"
)
is False
):
@@ -815,7 +821,7 @@ class Tester(Crb):
)
self.logger.info(
"Sent: %s"
- % strip_pktload(tx_pkts[txport].pktgen.pkts[idx], "L4")
+ % strip_pktload(tx_pkts[txport].scapy_pkt_util.pkts[idx], "L4")
)
self.logger.info("Recv: %s" % strip_pktload(recv_pkts[idx], "L4"))
return False
@@ -835,31 +841,31 @@ class Tester(Crb):
"""
Wrapper for packet module load_pcapfile
"""
- p = stop_and_load_tcpdump_packets(index, timeout=timeout)
- return p
+ scapy_pkt_builder = stop_and_load_tcpdump_packets(index, timeout=timeout)
+ return scapy_pkt_builder
def kill_all(self, killall=False):
"""
- Kill all scapy process or DPDK application on tester.
+ Kill all scapy process or DPDK application on TG.
"""
if not self.has_external_traffic_generator():
out = self.session.send_command("")
if ">>>" in out:
self.session.send_expect("quit()", "# ", timeout=3)
if killall:
- super(Tester, self).kill_all()
+ super(TrafficGeneratorNode, self).kill_all()
def close(self):
"""
Close ssh session and IXIA tcl session.
"""
if self.it_uses_external_generator():
- if self.is_pktgen and self.pktgen:
- self.pktgen.quit_generator()
+ if self.uses_perf_tg and self.perf_tg:
+ self.perf_tg.quit_generator()
# only restore ports if start trex in dts
- if "start_trex" in list(self.pktgen.conf.keys()):
+ if "start_trex" in list(self.perf_tg.conf.keys()):
self.restore_trex_interfaces()
- self.pktgen = None
+ self.perf_tg = None
if self.scapy_sessions_li:
for i in self.scapy_sessions_li:
@@ -875,9 +881,30 @@ class Tester(Crb):
self.session.close()
self.session = None
- def crb_exit(self):
+ def node_exit(self):
"""
- Close all resource before crb exit
+ Close all resource before node exit
"""
self.close()
self.logger.logger_exit()
+
+
+def get_perf_tg(tg_node, tg_type=TG_IXEXPLORER):
+ """
+ Get traffic generator object
+ """
+ tg_type = tg_type.lower()
+
+ tg_cls = {
+ TG_DPDK: DpdkTrafficGenerator,
+ TG_IXEXPLORER: IxExplorerTrafficGenerator,
+ TG_IXNETWORK: IxNetworkTrafficGenerator,
+ TG_TREX: TrexTrafficGenerator,
+ }
+
+ if tg_type in list(tg_cls.keys()):
+ CLS = tg_cls.get(tg_type)
+ return CLS(tg_node)
+ else:
+ msg = "not support <{0}> traffic generator".format(tg_type)
+ raise Exception(msg)
diff --git a/framework/pktgen_base.py b/framework/tg_perf.py
similarity index 70%
rename from framework/pktgen_base.py
rename to framework/tg_perf.py
index 7704200a..68d7f881 100644
--- a/framework/pktgen_base.py
+++ b/framework/tg_perf.py
@@ -2,23 +2,24 @@
# Copyright(c) 2010-2021 Intel Corporation
#
-import logging
+import os
import time
from abc import abstractmethod
from copy import deepcopy
from enum import Enum, unique
from pprint import pformat
-from .config import PktgenConf
-from .logger import getLogger
+from scapy.fields import ConditionalField
+from scapy.packet import NoPayload
+from scapy.packet import Packet as scapyPacket
+from scapy.utils import rdpcap
-# packet generator name
-from .settings import PKTGEN, PKTGEN_DPDK, PKTGEN_IXIA, PKTGEN_IXIA_NETWORK, PKTGEN_TREX
+from .config import TrafficGeneratorConf
+from .logger import getLogger
-# macro definition
-TRANSMIT_CONT = "continuous"
-TRANSMIT_M_BURST = "multi_burst"
-TRANSMIT_S_BURST = "single_burst"
+# traffic generator name
+from .settings import PERF_TG_CONF_KEY, TG_IXNETWORK, TG_TREX, TRANSMIT_CONT
+from .utils import convert_int2ip, convert_ip2int, convert_mac2long, convert_mac2str
@unique
@@ -27,18 +28,18 @@ class STAT_TYPE(Enum):
TXRX = "txrx"
-class PacketGenerator(object):
+class TrafficGenerator(object):
"""
- Basic class for packet generator, define basic function for each kinds of
- generators
+ Basic class for traffic generator, define basic functions for each kind of
+ generator
"""
- def __init__(self, tester):
- self.logger = getLogger(PKTGEN)
- self.tester = tester
+ def __init__(self, tg_node):
+ self.logger = getLogger(PERF_TG_CONF_KEY)
+ self.tg_node = tg_node
self.__streams = []
self._ports_map = []
- self.pktgen_type = None
+ self.tg_type = None
def _prepare_generator(self):
raise NotImplementedError
@@ -49,21 +50,21 @@ class PacketGenerator(object):
def _get_port_pci(self, port_id):
raise NotImplementedError
- def _convert_pktgen_port(self, port_id):
+ def _convert_tg_port(self, port_id):
"""
:param port_id:
- index of a port in packet generator tool
+ index of a port in traffic generator tool
"""
try:
gen_pci = self._get_port_pci(port_id)
if not gen_pci:
msg = "can't get port {0} pci address".format(port_id)
raise Exception(msg)
- for port_idx, info in enumerate(self.tester.ports_info):
+ for port_idx, info in enumerate(self.tg_node.ports_info):
if "pci" not in info or info["pci"] == "N/A":
return -1
- tester_pci = info["pci"]
- if tester_pci == gen_pci:
+ tg_pci = info["pci"]
+ if tg_pci == gen_pci:
msg = "gen port {0} map test port {1}".format(port_id, port_idx)
self.logger.debug(msg)
return port_idx
@@ -74,21 +75,21 @@ class PacketGenerator(object):
return port
- def _get_gen_port(self, tester_pci):
+ def _get_gen_port(self, tg_pci):
raise NotImplementedError
- def _convert_tester_port(self, port_id):
+ def _convert_tg_node_port(self, port_id):
"""
:param port_id:
- index of a port in dts tester ports info
+ index of a port in dts TG ports info
"""
try:
- info = self.tester.ports_info[port_id]
+ info = self.tg_node.ports_info[port_id]
# limit to nic port, not including ixia port
if "pci" not in info or info["pci"] == "N/A":
return -1
- tester_pci = info["pci"]
- port = self._get_gen_port(tester_pci)
+ tg_pci = info["pci"]
+ port = self._get_gen_port(tg_pci)
msg = "test port {0} map gen port {1}".format(port_id, port)
self.logger.debug(msg)
except Exception as e:
@@ -97,13 +98,13 @@ class PacketGenerator(object):
return port
def add_stream(self, tx_port, rx_port, pcap_file):
- pktgen_tx_port = self._convert_tester_port(tx_port)
- pktgen_rx_port = self._convert_tester_port(rx_port)
+ tg_node_tx_port = self._convert_tg_node_port(tx_port)
+ tg_node_rx_port = self._convert_tg_node_port(rx_port)
stream_id = len(self.__streams)
stream = {
- "tx_port": pktgen_tx_port,
- "rx_port": pktgen_rx_port,
+ "tx_port": tg_node_tx_port,
+ "rx_port": tg_node_rx_port,
"pcap_file": pcap_file,
}
self.__streams.append(stream)
@@ -153,12 +154,12 @@ class PacketGenerator(object):
def reset_streams(self):
self.__streams = []
- def __warm_up_pktgen(self, stream_ids, options, delay):
+ def __warm_up_tg(self, stream_ids, options, delay):
"""run warm up traffic before start main traffic"""
if not delay:
return
- msg = "{1} packet generator: run traffic {0}s to warm up ... ".format(
- delay, self.pktgen_type
+ msg = "{1} traffic generator: run traffic {0}s to warm up ... ".format(
+ delay, self.tg_type
)
self.logger.info(msg)
self._start_transmission(stream_ids, options)
@@ -242,12 +243,12 @@ class PacketGenerator(object):
delay:
warm up time before start main traffic. If it is set, it will start
- a delay time traffic to make sure packet generator under good status.
+ a delay time traffic to make sure traffic generator under good status.
Warm up flow is ignored by default.
interval:
a interval time of get throughput statistic (second)
- If set this key value, pktgen will return several throughput statistic
+ If set this key value, traffic generator will return several throughput statistic
data within a duration traffic. If not set this key value, only
return one statistic data. It is ignored by default.
@@ -270,19 +271,19 @@ class PacketGenerator(object):
callback = options.get("callback")
duration = options.get("duration") or 10
delay = options.get("delay")
- if self.pktgen_type == PKTGEN_TREX:
+ if self.tg_type == TG_TREX:
stat_type = options.get("stat_type") or STAT_TYPE.RX
else:
if options.get("stat_type") is not None:
msg = (
"'stat_type' option is only for trex, "
- "should not set when use other pktgen tools"
+ "should not set when use other traffic generator tools"
)
raise Exception(msg)
stat_type = STAT_TYPE.RX
self._prepare_transmission(stream_ids=stream_ids)
# start warm up traffic
- self.__warm_up_pktgen(stream_ids, options, delay)
+ self.__warm_up_tg(stream_ids, options, delay)
# main traffic
self._start_transmission(stream_ids, options)
# keep traffic within a duration time and get throughput statistic
@@ -305,7 +306,7 @@ class PacketGenerator(object):
throughput_stat_flag = options.get("throughput_stat_flag") or False
self._prepare_transmission(stream_ids=stream_ids)
# start warm up traffic
- self.__warm_up_pktgen(stream_ids, options, delay)
+ self.__warm_up_tg(stream_ids, options, delay)
# main traffic
self._start_transmission(stream_ids, options)
# keep traffic within a duration time
@@ -342,7 +343,7 @@ class PacketGenerator(object):
delay:
warm up time before start main traffic. If it is set, it will
- start a delay time traffic to make sure packet generator
+ start a delay time traffic to make sure traffic generator
under good status. Warm up flow is ignored by default.
duration:
@@ -384,7 +385,7 @@ class PacketGenerator(object):
delay:
warm up time before start main traffic. If it is set, it will
- start a delay time transmission to make sure packet generator
+ start a delay time transmission to make sure traffic generator
under correct status. Warm up flow is ignored by default.
duration:
@@ -394,7 +395,7 @@ class PacketGenerator(object):
duration = options.get("duration") or 10
self._prepare_transmission(stream_ids=stream_ids, latency=True)
# start warm up traffic
- self.__warm_up_pktgen(stream_ids, options, delay)
+ self.__warm_up_tg(stream_ids, options, delay)
# main traffic
self._start_transmission(stream_ids, options)
# keep traffic within a duration time
@@ -440,7 +441,7 @@ class PacketGenerator(object):
delay:
warm up time before start main traffic. If it is set, it will
- start a delay time traffic to make sure packet generator
+ start a delay time traffic to make sure traffic generator
under good status. Warm up flow is ignored by default.
duration:
@@ -534,7 +535,7 @@ class PacketGenerator(object):
options usage:
delay:
warm up time before start main traffic. If it is set, it will
- start a delay time traffic to make sure packet generator
+ start a delay time traffic to make sure traffic generator
under good status. Warm up flow is ignored by default.
duration:
@@ -552,7 +553,7 @@ class PacketGenerator(object):
accuracy :
dichotomy algorithm accuracy, default 0.001.
"""
- if self.pktgen_type == PKTGEN_IXIA_NETWORK:
+ if self.tg_type == TG_IXNETWORK:
return self._measure_rfc2544_ixnet(stream_ids, options)
max_rate = options.get("max_rate") or 100.0
@@ -566,7 +567,7 @@ class PacketGenerator(object):
_options = {"duration": duration}
if delay:
self._prepare_transmission(stream_ids=stream_ids)
- self.__warm_up_pktgen(stream_ids, _options, delay)
+ self.__warm_up_tg(stream_ids, _options, delay)
self._clear_streams()
# traffic parameters for dichotomy algorithm
loss_rate_table = []
@@ -641,7 +642,7 @@ class PacketGenerator(object):
def measure(self, stream_ids, traffic_opt):
"""
- use as an unify interface method for packet generator
+ use as an unify interface method for traffic generator
"""
method = traffic_opt.get("method")
if method == "throughput":
@@ -675,12 +676,13 @@ class PacketGenerator(object):
return self.__streams[stream_id]
def _get_generator_conf_instance(self):
- conf_inst = PktgenConf(self.pktgen_type)
- pktgen_inst_type = conf_inst.pktgen_conf.get_sections()
- if len(pktgen_inst_type) < 1:
+ conf_inst = TrafficGeneratorConf(self.tg_type)
+ tg_type = conf_inst.tg_conf.get_sections()
+ if len(tg_type) < 1:
msg = (
- "packet generator <{0}> has no configuration " "in pktgen.cfg"
- ).format(self.pktgen_type)
+ "traffic generator <{0}> has no configuration " +
+ "in traffic_generator.cfg"
+ ).format(self.tg_type)
raise Exception(msg)
return conf_inst
@@ -709,5 +711,161 @@ class PacketGenerator(object):
pass
-class DpdkPacketGenerator(PacketGenerator):
+class DpdkTrafficGenerator(TrafficGenerator):
pass # not implemented
+
+
+class TrafficGeneratorStream(object):
+ """default traffic generator stream option for all streams"""
+
+ default_opt = {
+ "stream_config": {
+ "txmode": {},
+ "transmit_mode": TRANSMIT_CONT,
+ # for temporary usage because current traffic generator design
+ # don't support port level configuration, here using stream
+ # configuration to pass rate percent
+ "rate": 100,
+ }
+ }
+
+ def __init__(self):
+ self.packetLayers = dict()
+
+ def _parse_packet_layer(self, pkt_object):
+ """parse one packet every layers' fields and value"""
+ if pkt_object == None:
+ return
+
+ self.packetLayers[pkt_object.name] = dict()
+ for curfield in pkt_object.fields_desc:
+ if isinstance(curfield, ConditionalField) and not curfield._evalcond(
+ pkt_object
+ ):
+ continue
+ field_value = pkt_object.getfieldval(curfield.name)
+ if isinstance(field_value, scapyPacket) or (
+ curfield.islist and curfield.holds_packets and type(field_value) is list
+ ):
+ continue
+ repr_value = curfield.i2repr(pkt_object, field_value)
+ if isinstance(repr_value, str):
+ repr_value = repr_value.replace(
+ os.linesep, os.linesep + " " * (len(curfield.name) + 4)
+ )
+ self.packetLayers[pkt_object.name][curfield.name] = repr_value
+
+ if isinstance(pkt_object.payload, NoPayload):
+ return
+ else:
+ self._parse_packet_layer(pkt_object.payload)
+
+ def _parse_pcap(self, pcapFile, number=0):
+ """parse one packet content"""
+ pcap_pkts = []
+ if os.path.exists(pcapFile) == False:
+ warning = "{0} is not exist !".format(pcapFile)
+ raise Exception(warning)
+
+ pcap_pkts = rdpcap(pcapFile)
+ # parse packets' every layers and fields
+ if len(pcap_pkts) == 0:
+ warning = "{0} is empty".format(pcapFile)
+ raise Exception(warning)
+ elif number >= len(pcap_pkts):
+ warning = "{0} is missing No.{1} packet".format(pcapFile, number)
+ raise Exception(warning)
+ else:
+ self._parse_packet_layer(pcap_pkts[number])
+
+ def _set_tg_fields_config(self, pcap, suite_config):
+ """
+ get default fields value from a pcap file and unify layer fields
+ variables for trex/ixia
+ """
+ self._parse_pcap(pcap)
+ if not self.packetLayers:
+ msg = "pcap content is empty"
+ raise Exception(msg)
+ # suite fields config convert to traffic generator fields config
+ fields_config = {}
+ # set ethernet protocol layer fields
+ layer_name = "mac"
+ if layer_name in list(suite_config.keys()) and "Ethernet" in self.packetLayers:
+ fields_config[layer_name] = {}
+ suite_fields = suite_config.get(layer_name)
+ pcap_fields = self.packetLayers.get("Ethernet")
+ for name, config in suite_fields.items():
+ action = config.get("action") or "default"
+ range = config.get("range") or 64
+ step = config.get("step") or 1
+ start_mac = pcap_fields.get(name)
+ end_mac = convert_mac2str(convert_mac2long(start_mac) + range - 1)
+ fields_config[layer_name][name] = {}
+ fields_config[layer_name][name]["start"] = start_mac
+ fields_config[layer_name][name]["end"] = end_mac
+ fields_config[layer_name][name]["step"] = step
+ fields_config[layer_name][name]["action"] = action
+ # set ip protocol layer fields
+ layer_name = "ip"
+ if layer_name in list(suite_config.keys()) and "IP" in self.packetLayers:
+ fields_config[layer_name] = {}
+ suite_fields = suite_config.get(layer_name)
+ pcap_fields = self.packetLayers.get("IP")
+ for name, config in suite_fields.items():
+ action = config.get("action") or "default"
+ range = config.get("range") or 64
+ step = config.get("step") or 1
+ start_ip = pcap_fields.get(name)
+ end_ip = convert_int2ip(convert_ip2int(start_ip) + range - 1)
+ fields_config[layer_name][name] = {}
+ fields_config[layer_name][name]["start"] = start_ip
+ fields_config[layer_name][name]["end"] = end_ip
+ fields_config[layer_name][name]["step"] = step
+ fields_config[layer_name][name]["action"] = action
+ # set vlan protocol layer fields, only support one layer vlan here
+ layer_name = "vlan"
+ if layer_name in list(suite_config.keys()) and "802.1Q" in self.packetLayers:
+ fields_config[layer_name] = {}
+ suite_fields = suite_config.get(layer_name)
+ pcap_fields = self.packetLayers.get("802.1Q")
+ # only support one layer vlan here, so set name to `0`
+ name = 0
+ if name in list(suite_fields.keys()):
+ config = suite_fields[name]
+ action = config.get("action") or "default"
+ range = config.get("range") or 64
+ # ignore 'L' suffix
+ if "L" in pcap_fields.get(layer_name):
+ start_vlan = int(pcap_fields.get(layer_name)[:-1])
+ else:
+ start_vlan = int(pcap_fields.get(layer_name))
+ end_vlan = start_vlan + range - 1
+ fields_config[layer_name][name] = {}
+ fields_config[layer_name][name]["start"] = start_vlan
+ fields_config[layer_name][name]["end"] = end_vlan
+ fields_config[layer_name][name]["step"] = 1
+ fields_config[layer_name][name]["action"] = action
+
+ return fields_config
+
+ def prepare_stream_from_tginput(
+ self, tgen_input, ratePercent, vm_config, tg_node
+ ):
+ """create streams for ports, one port one stream"""
+ # set stream in traffic generator
+ stream_ids = []
+ for config in tgen_input:
+ stream_id = tg_node.add_stream(*config)
+ pcap = config[2]
+ _options = deepcopy(self.default_opt)
+ _options["pcap"] = pcap
+ _options["stream_config"]["rate"] = ratePercent
+ # if vm is set
+ if vm_config:
+ _options["fields_config"] = self._set_tg_fields_config(
+ pcap, vm_config
+ )
+ tg_node.config_stream(stream_id, _options)
+ stream_ids.append(stream_id)
+ return stream_ids
diff --git a/framework/pktgen_trex.py b/framework/tg_trex.py
similarity index 95%
rename from framework/pktgen_trex.py
rename to framework/tg_trex.py
index 9ae27a1c..f180d16b 100644
--- a/framework/pktgen_trex.py
+++ b/framework/tg_trex.py
@@ -11,14 +11,14 @@ from pprint import pformat
from scapy.layers.inet import IP
from scapy.layers.l2 import Ether
-from .pktgen_base import (
- PKTGEN,
- PKTGEN_TREX,
+from .settings import (
+ PERF_TG_CONF_KEY,
+ TG_TREX,
TRANSMIT_CONT,
TRANSMIT_M_BURST,
TRANSMIT_S_BURST,
- PacketGenerator,
)
+from .tg_perf import TrafficGenerator
class TrexConfigVm(object):
@@ -397,15 +397,15 @@ class TrexConfigStream(object):
conn.add_streams(streams, ports=ports)
-class TrexPacketGenerator(PacketGenerator):
+class TrexTrafficGenerator(TrafficGenerator):
"""
- Trex packet generator, detail usage can be seen at
+ Trex traffic generator, detail usage can be seen at
https://trex-tgn.cisco.com/trex/doc/trex_manual.html
"""
- def __init__(self, tester):
- super(TrexPacketGenerator, self).__init__(tester)
- self.pktgen_type = PKTGEN_TREX
+ def __init__(self, tg_node):
+ super(TrexTrafficGenerator, self).__init__(tg_node)
+ self.tg_type = TG_TREX
self.trex_app = "t-rex-64"
self._conn = None
self.control_session = None
@@ -416,7 +416,7 @@ class TrexPacketGenerator(PacketGenerator):
self._rx_ports = []
conf_inst = self._get_generator_conf_instance()
- self.conf = conf_inst.load_pktgen_config()
+ self.conf = conf_inst.load_tg_config()
self.options_keys = ["txmode", "ip", "vlan", "transmit_mode", "rate"]
self.ip_keys = ["start", "end", "action", "mask", "step"]
@@ -455,11 +455,11 @@ class TrexPacketGenerator(PacketGenerator):
# set trex class
self.STLClient = STLClient
- # get configuration from pktgen config file
+ # get configuration from traffic generator config file
self._get_traffic_option()
def _get_traffic_option(self):
- """get configuration from pktgen config file"""
+ """get configuration from traffic generator config file"""
# set trex coremask
_core_mask = self.conf.get("core_mask")
if _core_mask:
@@ -495,7 +495,7 @@ class TrexPacketGenerator(PacketGenerator):
def _get_gen_port(self, pci):
"""
- get port management id of the packet generator
+ get port management id of the traffic generator
"""
for name, _port_obj in self._conn.ports.items():
_pci = _port_obj.info["pci_addr"]
@@ -506,7 +506,7 @@ class TrexPacketGenerator(PacketGenerator):
def _is_gen_port(self, pci):
"""
- check if a pci address is managed by the packet generator
+ check if a pci address is managed by the traffic generator
"""
for name, _port_obj in self._conn.ports.items():
_pci = _port_obj.info["pci_addr"]
@@ -536,7 +536,7 @@ class TrexPacketGenerator(PacketGenerator):
return ports
def _clear_streams(self):
- """clear streams in trex and `PacketGenerator`"""
+ """clear streams in trex and `TrafficGenerator`"""
# if streams has been attached, remove them from trex server.
self._remove_all_streams()
@@ -559,7 +559,7 @@ class TrexPacketGenerator(PacketGenerator):
self._conn = None
def _check_options(self, opts={}):
- return True # close it and wait for more discussion about pktgen framework
+ return True # close it and wait for more discussion about traffic generator framework
for key in opts:
if key in self.options_keys:
if key == "ip":
@@ -601,12 +601,12 @@ class TrexPacketGenerator(PacketGenerator):
flow_control_opt = "--no-flow-control-change" if flow_control else ""
for key in self.conf:
- # key, value = pktgen_conf
+ # key, value = tg_conf
if key == "config_file":
app_param_temp = app_param_temp + " --cfg " + self.conf[key]
elif key == "core_num":
app_param_temp = app_param_temp + " -c " + self.conf[key]
- self.control_session = self.tester.create_session(PKTGEN)
+ self.control_session = self.tg_node.create_session(PERF_TG_CONF_KEY)
self.control_session.send_expect(
";".join(
[
@@ -625,7 +625,7 @@ class TrexPacketGenerator(PacketGenerator):
@property
def _vm_conf(self):
- return None # close it and wait for more discussion about pktgen framework
+ return None # close it and wait for more discussion about traffic generator framework
conf = {}
# get the subnet range of src and dst ip
if "ip_src" in self.conf:
@@ -775,23 +775,23 @@ class TrexPacketGenerator(PacketGenerator):
config = {}
config.update(options)
# since trex stream rate percent haven't taken effect, here use one
- # stream rate percent as port rate percent. In pktgen, all streams
- # rate percent are the same value by design. flow control option is
- # the same.
+ # stream rate percent as port rate percent. In traffic generator,
+ # all streams rate percent are the same value by design.
+ # flow control option is the same.
stream_config = options.get("stream_config") or {}
self._traffic_opt["rate"] = stream_config.get("rate") or 100
if stream_config.get("pps"): # reserve feature
self._traffic_opt["pps"] = stream_config.get("pps")
# flow control option is deployed on all ports by design
self._traffic_opt["flow_control"] = options.get("flow_control") or {}
- # if vm config by pktgen config file, set it here to take the place
- # of user setting
+ # if vm config by traffic generator config file, set it here
+ # to take the place of user setting
if self._vm_conf:
config["fields_config"] = self._vm_conf
port_config[tx_port].append(config)
if not port_config:
- msg = "no stream options for trex packet generator"
+ msg = "no stream options for trex traffic generator"
raise Exception(msg)
self._conn.connect()
@@ -875,7 +875,7 @@ class TrexPacketGenerator(PacketGenerator):
if self._conn is not None:
self._disconnect()
if self.control_session is not None:
- self.tester.alt_session.send_expect("pkill -f _t-rex-64", "# ")
+ self.tg_node.alt_session.send_expect("pkill -f _t-rex-64", "# ")
time.sleep(5)
- self.tester.destroy_session(self.control_session)
+ self.tg_node.destroy_session(self.control_session)
self.control_session = None
diff --git a/framework/utils.py b/framework/utils.py
index 6378940d..3c38cfbd 100644
--- a/framework/utils.py
+++ b/framework/utils.py
@@ -16,13 +16,13 @@ from functools import wraps
DTS_ENV_PAT = r"DTS_*"
-def create_parallel_locks(num_duts):
+def create_parallel_locks(num_suts):
"""
- Create thread lock dictionary based on DUTs number
+ Create thread lock dictionary based on SUTs number
"""
global locks_info
locks_info = []
- for _ in range(num_duts):
+ for _ in range(num_suts):
lock_info = dict()
lock_info["update_lock"] = threading.RLock()
locks_info.append(lock_info)
@@ -32,7 +32,7 @@ def parallel_lock(num=1):
"""
Wrapper function for protect parallel threads, allow multiple threads
share one lock. Locks are created based on function name. Thread locks are
- separated between duts according to argument 'dut_id'.
+ separated between SUTs according to argument 'sut_id'.
Parameter:
num: Number of parallel threads for the lock
"""
@@ -41,16 +41,16 @@ def parallel_lock(num=1):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
- if "dut_id" in kwargs:
- dut_id = kwargs["dut_id"]
+ if "sut_id" in kwargs:
+ sut_id = kwargs["sut_id"]
else:
- dut_id = 0
+ sut_id = 0
# in case function arguments is not correct
- if dut_id >= len(locks_info):
- dut_id = 0
+ if sut_id >= len(locks_info):
+ sut_id = 0
- lock_info = locks_info[dut_id]
+ lock_info = locks_info[sut_id]
uplock = lock_info["update_lock"]
name = func.__name__
@@ -70,8 +70,8 @@ def parallel_lock(num=1):
if lock._is_owned():
print(
RED(
- "DUT%d %s waiting for func lock %s"
- % (dut_id, threading.current_thread().name, func.__name__)
+ "SUT%d %s waiting for func lock %s"
+ % (sut_id, threading.current_thread().name, func.__name__)
)
)
lock.acquire()
@@ -151,13 +151,13 @@ def get_obj_funcs(obj, func_name_regex):
@parallel_lock()
-def remove_old_rsa_key(crb, ip):
+def remove_old_rsa_key(node, ip):
"""
- Remove the old RSA key of specified IP on crb.
+ Remove the old RSA key of specified IP on node.
"""
rsa_key_path = "~/.ssh/known_hosts"
remove_rsa_key_cmd = "sed -i '/%s/d' %s" % (ip, rsa_key_path)
- crb.send_expect(remove_rsa_key_cmd, "# ")
+ node.send_expect(remove_rsa_key_cmd, "# ")
def human_read_number(num):
@@ -273,9 +273,9 @@ def get_backtrace_object(file_name, obj_name):
return obj
-def check_crb_python_version(crb):
+def check_node_python_version(node):
cmd = "python3 -V"
- out = crb.send_expect(cmd, "#", 5)
+ out = node.send_expect(cmd, "#", 5)
pat = "Python (\d+).(\d+).(\d+)"
result = re.findall(pat, out)
if (
@@ -284,14 +284,14 @@ def check_crb_python_version(crb):
or (int(result[0][0]) == 3 and int(result[0][1]) < 6)
or (int(result[0][0]) == 3 and int(result[0][1]) == 6 and int(result[0][2]) < 9)
):
- crb.logger.warning(
+ node.logger.warning(
(
- "WARNING: Tester node python version is lower than python 3.6, "
+ "WARNING: TG node python version is lower than python 3.6, "
"it is deprecated for use in DTS, "
"and will not work in future releases."
)
)
- crb.logger.warning("Please use Python >= 3.6.9 instead")
+ node.logger.warning("Please use Python >= 3.6.9 instead")
def check_dts_python_version():
diff --git a/framework/virt_base.py b/framework/virt_base.py
index a7fc8c4e..5717ba23 100644
--- a/framework/virt_base.py
+++ b/framework/virt_base.py
@@ -12,10 +12,10 @@ import framework.exception as exception
import framework.utils as utils
from .config import VIRTCONF, VirtConf
-from .dut import Dut
from .logger import getLogger
from .settings import CONFIG_ROOT_PATH
-from .virt_dut import VirtDut
+from .sut_node import SutNode
+from .virt_sut import VirtSut
ST_NOTSTART = "NOTSTART"
ST_PAUSE = "PAUSE"
@@ -32,32 +32,32 @@ class VirtBase(object):
function, we can get and set the VM boot command, and instantiate the VM.
"""
- def __init__(self, dut, vm_name, suite_name):
+ def __init__(self, sut, vm_name, suite_name):
"""
Initialize the VirtBase.
- dut: the instance of Dut
+ sut: the instance of SUT
vm_name: the name of VM which you have configured in the configure
suite_name: the name of test suite
"""
- self.host_dut = dut
+ self.host_sut = sut
self.vm_name = vm_name
self.suite = suite_name
# indicate whether the current vm is migration vm
self.migration_vm = False
# create self used host session, need close it later
- self.host_session = self.host_dut.new_session(self.vm_name)
+ self.host_session = self.host_sut.new_session(self.vm_name)
- self.host_logger = self.host_dut.logger
- # base_dir existed for host dut has prepared it
- self.host_session.send_expect("cd %s" % self.host_dut.base_dir, "# ")
+ self.host_logger = self.host_sut.logger
+ # base_dir existed for host SUT has prepared it
+ self.host_session.send_expect("cd %s" % self.host_sut.base_dir, "# ")
# init the host resource pool for VM
- self.virt_pool = self.host_dut.virt_pool
+ self.virt_pool = self.host_sut.virt_pool
if not self.has_virtual_ability():
if not self.enable_virtual_ability():
- raise Exception("Dut [ %s ] cannot have the virtual ability!!!")
+ raise Exception("SUT [ %s ] cannot have the virtual ability!!!")
self.virt_type = self.get_virt_type()
@@ -88,7 +88,7 @@ class VirtBase(object):
def enable_virtual_ability(self):
"""
- Enable the virtual ability on the DUT.
+ Enable the virtual ability on the SUT.
"""
NotImplemented
@@ -217,7 +217,7 @@ class VirtBase(object):
def generate_unique_mac(self):
"""
- Generate a unique MAC based on the DUT.
+ Generate a unique MAC based on the SUT.
"""
mac_head = "00:00:00:"
mac_tail = ":".join(
@@ -275,7 +275,7 @@ class VirtBase(object):
def start(self, load_config=True, set_target=True, cpu_topo="", bind_dev=True):
"""
- Start VM and instantiate the VM with VirtDut.
+ Start VM and instantiate the VM with VirtSut.
"""
try:
if load_config is True:
@@ -287,12 +287,12 @@ class VirtBase(object):
self._start_vm()
if self.vm_status is ST_RUNNING:
- # connect vm dut and init running environment
- vm_dut = self.instantiate_vm_dut(
+ # connect vm SUT and init running environment
+ vm_sut = self.instantiate_vm_sut(
set_target, cpu_topo, bind_dev=bind_dev, autodetect_topo=True
)
else:
- vm_dut = None
+ vm_sut = None
except Exception as vm_except:
if self.handle_exception(vm_except):
@@ -304,7 +304,7 @@ class VirtBase(object):
self.callback()
return None
- return vm_dut
+ return vm_sut
def quick_start(self, load_config=True, set_target=True, cpu_topo=""):
"""
@@ -337,8 +337,8 @@ class VirtBase(object):
if self.vm_status is ST_PAUSE:
# flag current vm is migration vm
self.migration_vm = True
- # connect backup vm dut and it just inherited from host
- vm_dut = self.instantiate_vm_dut(
+ # connect backup vm SUT and it just inherited from host
+ vm_sut = self.instantiate_vm_sut(
set_target, cpu_topo, bind_dev=False, autodetect_topo=False
)
except Exception as vm_except:
@@ -349,7 +349,7 @@ class VirtBase(object):
return None
- return vm_dut
+ return vm_sut
def handle_exception(self, vm_except):
# show exception back trace
@@ -369,13 +369,13 @@ class VirtBase(object):
elif type(vm_except) is exception.StartVMFailedException:
# start vm failure
return True
- elif type(vm_except) is exception.VirtDutConnectException:
+ elif type(vm_except) is exception.VirtSutConnectException:
# need stop vm
self._stop_vm()
return True
- elif type(vm_except) is exception.VirtDutInitException:
+ elif type(vm_except) is exception.VirtSutInitException:
# need close session
- vm_except.vm_dut.close()
+ vm_except.vm_sut.close()
# need stop vm
self.stop()
return True
@@ -404,82 +404,82 @@ class VirtBase(object):
if "disk" in list(self.params[i].keys()):
value = self.params[i]["disk"][0]
if "file" in list(value.keys()):
- host_ip = self.host_dut.get_ip_address()
+ host_ip = self.host_sut.get_ip_address()
return (
host_ip
+ ":"
- + self.host_dut.test_classname
+ + self.host_sut.test_classname
+ ":"
+ value["file"]
)
return None
- def instantiate_vm_dut(
+ def instantiate_vm_sut(
self, set_target=True, cpu_topo="", bind_dev=True, autodetect_topo=True
):
"""
- Instantiate the Dut class for VM.
+ Instantiate the SUT class for VM.
"""
- crb = self.host_dut.crb.copy()
- crb["bypass core0"] = False
+ node = self.host_sut.node.copy()
+ node["bypass core0"] = False
vm_ip = self.get_vm_ip()
- crb["IP"] = vm_ip
- crb["My IP"] = vm_ip
+ node["IP"] = vm_ip
+ node["My IP"] = vm_ip
username, password = self.get_vm_login()
- crb["user"] = username
- crb["pass"] = password
+ node["user"] = username
+ node["pass"] = password
- serializer = self.host_dut.serializer
+ serializer = self.host_sut.serializer
try:
- vm_dut = VirtDut(
+ vm_sut = VirtSut(
self,
- crb,
+ node,
serializer,
self.virt_type,
self.vm_name,
self.suite,
cpu_topo,
- dut_id=self.host_dut.dut_id,
+ sut_id=self.host_sut.sut_id,
)
except Exception as vm_except:
self.handle_exception(vm_except)
- raise exception.VirtDutConnectException
+ raise exception.VirtSutConnectException
return None
- vm_dut.nic_type = "any"
- vm_dut.tester = self.host_dut.tester
- vm_dut.host_session = self.host_session
- vm_dut.init_log()
- vm_dut.migration_vm = self.migration_vm
+ vm_sut.nic_type = "any"
+ vm_sut.tg_node = self.host_sut.tg_node
+ vm_sut.host_session = self.host_session
+ vm_sut.init_log()
+ vm_sut.migration_vm = self.migration_vm
read_cache = False
- skip_setup = self.host_dut.skip_setup
+ skip_setup = self.host_sut.skip_setup
vm_img = self.get_vm_img()
# if current vm is migration vm, skip compile dpdk
# if VM_IMG_list include the vm_img, it means the vm have complie the dpdk ok, skip it
if self.migration_vm or vm_img in VM_IMG_LIST:
skip_setup = True
- base_dir = self.host_dut.base_dir
- vm_dut.set_speedup_options(read_cache, skip_setup)
+ base_dir = self.host_sut.base_dir
+ vm_sut.set_speedup_options(read_cache, skip_setup)
# package and patch should be set before prerequisites
- vm_dut.set_package(self.host_dut.package, self.host_dut.patches)
+ vm_sut.set_package(self.host_sut.package, self.host_sut.patches)
# base_dir should be set before prerequisites
- vm_dut.set_directory(base_dir)
+ vm_sut.set_directory(base_dir)
try:
# setting up dpdk in vm, must call at last
- vm_dut.target = self.host_dut.target
- vm_dut.prerequisites(
- self.host_dut.package, self.host_dut.patches, autodetect_topo
+ vm_sut.target = self.host_sut.target
+ vm_sut.prerequisites(
+ self.host_sut.package, self.host_sut.patches, autodetect_topo
)
if set_target:
- target = self.host_dut.target
- vm_dut.set_target(target, bind_dev, self.def_driver, self.driver_mode)
+ target = self.host_sut.target
+ vm_sut.set_target(target, bind_dev, self.def_driver, self.driver_mode)
except:
- raise exception.VirtDutInitException(vm_dut)
+ raise exception.VirtSutInitException(vm_sut)
return None
# after prerequisites and set_target, the dpdk compile is ok, add this vm img to list
@@ -488,8 +488,8 @@ class VirtBase(object):
VM_IMG_LIST.append(vm_img)
mutex_vm_list.release()
- self.vm_dut = vm_dut
- return vm_dut
+ self.vm_sut = vm_sut
+ return vm_sut
def stop(self):
"""
@@ -508,16 +508,16 @@ class VirtBase(object):
self.host_session.close()
self.host_session = None
- # vm_dut may not init in migration case
- if getattr(self, "vm_dut", None):
+ # vm_sut may not init in migration case
+ if getattr(self, "vm_sut", None):
if self.vm_status is ST_RUNNING:
- self.vm_dut.close()
+ self.vm_sut.close()
else:
# when vm is not running, not close session forcely
- self.vm_dut.close(force=True)
+ self.vm_sut.close(force=True)
- self.vm_dut.logger.logger_exit()
- self.vm_dut = None
+ self.vm_sut.logger.logger_exit()
+ self.vm_sut = None
def register_exit_callback(self, callback):
"""
diff --git a/framework/virt_common.py b/framework/virt_common.py
index 348aea38..7b0d2693 100644
--- a/framework/virt_common.py
+++ b/framework/virt_common.py
@@ -10,7 +10,7 @@ from .qemu_libvirt import LibvirtKvm
from .settings import CONFIG_ROOT_PATH
-def VM(dut, vm_name, suite_name):
+def VM(sut, vm_name, suite_name):
conf = VirtConf(CONFIG_ROOT_PATH + os.sep + suite_name + ".cfg")
conf.load_virt_config(vm_name)
local_conf = conf.get_virt_config()
@@ -21,8 +21,8 @@ def VM(dut, vm_name, suite_name):
virt_type = param["virt_type"][0]["virt_type"]
if virt_type == "KVM":
- return QEMUKvm(dut, vm_name, suite_name)
+ return QEMUKvm(sut, vm_name, suite_name)
elif virt_type == "LIBVIRT":
- return LibvirtKvm(dut, vm_name, suite_name)
+ return LibvirtKvm(sut, vm_name, suite_name)
else:
raise Exception("Virt type %s is not supported!" % virt_type)
diff --git a/framework/virt_resource.py b/framework/virt_resource.py
index 3bc4b87b..e9c1c65d 100644
--- a/framework/virt_resource.py
+++ b/framework/virt_resource.py
@@ -17,21 +17,21 @@ QuickScan = True
class VirtResource(object):
"""
- Class handle dut resource, like cpu, memory, net devices
+ Class handle SUT resource, like cpu, memory, net devices
"""
- def __init__(self, dut):
- self.dut = dut
+ def __init__(self, sut_node):
+ self.sut_node = sut_node
- self.cores = [int(core["thread"]) for core in dut.cores]
+ self.cores = [int(core["thread"]) for core in sut_node.cores]
# initialized unused cores
self.unused_cores = self.cores[:]
# initialized used cores
self.used_cores = [-1] * len(self.unused_cores)
- self.ports_info = dut.ports_info
+ self.ports_info = sut_node.ports_info
# initialized unused ports
- self.ports = [port["pci"] for port in dut.ports_info]
+ self.ports = [port["pci"] for port in sut_node.ports_info]
self.unused_ports = self.ports[:]
# initialized used ports
self.used_ports = ["unused"] * len(self.unused_ports)
@@ -94,12 +94,12 @@ class VirtResource(object):
self.used_cores[index] = -1
def __core_on_socket(self, core, socket):
- for dut_core in self.dut.cores:
- if int(dut_core["thread"]) == core:
+ for sut_core in self.sut_node.cores:
+ if int(sut_core["thread"]) == core:
if socket == -1:
return True
- if int(dut_core["socket"]) == socket:
+ if int(sut_core["socket"]) == socket:
return True
else:
return False
@@ -174,15 +174,15 @@ class VirtResource(object):
def __vm_has_resource(self, vm, resource=""):
if vm == "":
- self.dut.logger.info("VM name can't be NULL!!!")
+ self.sut_node.logger.info("VM name can't be NULL!!!")
raise Exception("VM name can't be NULL!!!")
if vm not in self.allocated_info:
- self.dut.logger.info("There is no resource allocated to VM [%s]." % vm)
+ self.sut_node.logger.info("There is no resource allocated to VM [%s]." % vm)
return False
if resource == "":
return True
if resource not in self.allocated_info[vm]:
- self.dut.logger.info(
+ self.sut_node.logger.info(
"There is no resource [%s] allocated to VM [%s] " % (resource, vm)
)
return False
@@ -356,7 +356,7 @@ class VirtResource(object):
while True:
if (
- self.dut.check_port_occupied(port) is False
+ self.sut_node.check_port_occupied(port) is False
and self._check_port_allocated(port) is False
):
break
@@ -439,7 +439,7 @@ class VirtResource(object):
return self.allocated_info[vm]["ports"]
-class simple_dut(object):
+class simple_sut(object):
def __init__(self):
self.ports_info = []
self.cores = []
@@ -449,8 +449,8 @@ class simple_dut(object):
if __name__ == "__main__":
- dut = simple_dut()
- dut.cores = [
+ sut = simple_sut()
+ sut.cores = [
{"thread": "1", "socket": "0"},
{"thread": "2", "socket": "0"},
{"thread": "3", "socket": "0"},
@@ -465,7 +465,7 @@ if __name__ == "__main__":
{"thread": "12", "socket": "1"},
]
- dut.ports_info = [
+ sut.ports_info = [
{
"intf": "p786p1",
"source": "cfg",
@@ -508,7 +508,7 @@ if __name__ == "__main__":
},
]
- virt_pool = VirtResource(dut)
+ virt_pool = VirtResource(sut)
print("Alloc two PF devices on socket 1 from VM")
print(virt_pool.alloc_pf(vm="test1", number=2, socket=1))
diff --git a/framework/virt_scene.py b/framework/virt_scene.py
index 6cc22813..ef393b71 100644
--- a/framework/virt_scene.py
+++ b/framework/virt_scene.py
@@ -30,22 +30,22 @@ from .utils import create_mask
class VirtScene(object):
- def __init__(self, dut, tester, scene_name):
+ def __init__(self, sut, tg_node, scene_name):
self.name = scene_name
- self.host_dut = dut
- self.tester_dut = tester
- self.vm_dut = None
+ self.host_sut = sut
+ self.tg_sut = tg_node
+ self.vm_sut = None
self.pre_cmds = []
self.post_cmds = []
- self.vm_dut_enable = False
+ self.vm_sut_enable = False
self.auto_portmap = True
self.vm_type = "kvm"
self.def_target = "x86_64-native-linuxapp-gcc"
self.host_bound = False
- # for vm dut init_log
- self.host_dut.test_classname = "dts"
+ # for vm SUT init_log
+ self.host_sut.test_classname = "dts"
def load_config(self):
try:
@@ -92,9 +92,9 @@ class VirtScene(object):
def prepare_suite(self, conf):
for param in conf:
- if "dut" in list(param.keys()):
- if param["dut"] == "vm_dut":
- self.vm_dut_enable = True
+ if "sut" in list(param.keys()):
+ if param["sut"] == "vm_sut":
+ self.vm_sut_enable = True
if "type" in list(param.keys()):
if param["type"] == "xen":
self.vm_type = "xen"
@@ -122,10 +122,10 @@ class VirtScene(object):
else:
target = self.def_target
- self.host_dut.set_target(target, bind_dev=True)
+ self.host_sut.set_target(target, bind_dev=True)
if opts["dpdk"] == "testpmd":
- self.pmdout = PmdOutput(self.host_dut)
+ self.pmdout = PmdOutput(self.host_sut)
cores = opts["cores"].split()
out = self.pmdout.start_testpmd(cores)
if "Error" in out:
@@ -142,16 +142,16 @@ class VirtScene(object):
cpus = cpu_conf["skipcores"].split()
# remove invalid configured core
for cpu in cpus:
- if int(cpu) not in self.host_dut.virt_pool.cores:
+ if int(cpu) not in self.host_sut.virt_pool.cores:
cpus.remove(cpu)
# create core mask for reserved cores
core_mask = create_mask(cpus)
# reserve those skipped cores
- self.host_dut.virt_pool.reserve_cpu(core_mask)
+ self.host_sut.virt_pool.reserve_cpu(core_mask)
if "numa" in list(cpu_conf.keys()):
if cpu_conf["numa"] == "auto":
- numa = self.host_dut.ports_info[0]["port"].socket
+ numa = self.host_sut.ports_info[0]["port"].socket
else:
numa = int(cpu_conf["numa"])
else:
@@ -176,9 +176,9 @@ class VirtScene(object):
pin_cores = cpu_conf["cpu_pin"].split()
if len(pin_cores):
- cores = self.host_dut.virt_pool.alloc_cpu(vm=vm_name, corelist=pin_cores)
+ cores = self.host_sut.virt_pool.alloc_cpu(vm=vm_name, corelist=pin_cores)
else:
- cores = self.host_dut.virt_pool.alloc_cpu(
+ cores = self.host_sut.virt_pool.alloc_cpu(
vm=vm_name, number=num, socket=numa
)
core_cfg = ""
@@ -223,7 +223,7 @@ class VirtScene(object):
params["device"][index] = new_param
for param in params["device"]:
- netdev = get_netdev(self.host_dut, param["opt_host"])
+ netdev = get_netdev(self.host_sut, param["opt_host"])
if netdev is not None:
netdev.bind_driver("pci-stub")
@@ -231,9 +231,9 @@ class VirtScene(object):
pf_param = {}
# strip pf pci id
pf = int(param["pf_idx"])
- if pf >= len(self.host_dut.ports_info):
+ if pf >= len(self.host_sut.ports_info):
raise VirtDeviceCreateException
- pf_pci = self.host_dut.ports_info[pf]["pci"]
+ pf_pci = self.host_sut.ports_info[pf]["pci"]
pf_param["driver"] = "pci-assign"
pf_param["opt_host"] = pf_pci
if param["guestpci"] != "auto":
@@ -246,8 +246,8 @@ class VirtScene(object):
# strip vf pci id
if "pf_dev" in list(param.keys()):
pf = int(param["pf_dev"])
- pf_net = self.host_dut.ports_info[pf]["port"]
- vfs = self.host_dut.ports_info[pf]["vfs_port"]
+ pf_net = self.host_sut.ports_info[pf]["port"]
+ vfs = self.host_sut.ports_info[pf]["vfs_port"]
vf_idx = int(param["vf_idx"])
if vf_idx >= len(vfs):
raise VirtDeviceCreateException
@@ -267,7 +267,7 @@ class VirtScene(object):
command = {}
command["type"] = "host"
if not self.host_bound:
- intf = self.host_dut.ports_info[port]["intf"]
+ intf = self.host_sut.ports_info[port]["intf"]
command["command"] = "ifconfig %s up" % intf
self.reg_postvm_cmds(command)
@@ -284,7 +284,7 @@ class VirtScene(object):
try:
print(utils.GREEN("create vf %d %d %s" % (port, vf_num, driver)))
- self.host_dut.generate_sriov_vfs_by_port(port, vf_num, driver)
+ self.host_sut.generate_sriov_vfs_by_port(port, vf_num, driver)
self.reset_pf_cmds(port)
except:
print(utils.RED("Failed to create vf as requested!!!"))
@@ -296,16 +296,16 @@ class VirtScene(object):
try:
print(utils.GREEN("destroy vfs on port %d" % port))
- self.host_dut.destroy_sriov_vfs_by_port(port)
+ self.host_sut.destroy_sriov_vfs_by_port(port)
except:
print(utils.RED("Failed to destroy vf as requested!!!"))
def reg_prevm_cmds(self, command):
"""
- command: {'type':'host/tester/vm',
- define which crb command progress
+ command: {'type':'host/tg/vm',
+ define which node command progress
'command':'XXX',
- command send to crb
+ command send to node
'expect':'XXX',
expected output for command
'timeout': 60,
@@ -320,11 +320,11 @@ class VirtScene(object):
if cmd["type"] == "vm":
print(utils.RED("Can't run vm command when vm not ready"))
elif cmd["type"] == "host":
- crb = self.host_dut
- elif cmd["type"] == "tester":
- crb = self.tester_dut
+ node = self.host_sut
+ elif cmd["type"] == "tg":
+ node = self.tg_sut
else:
- crb = self.host_dut
+ node = self.host_sut
if "expect" not in list(cmd.keys()):
expect = "# "
@@ -341,7 +341,7 @@ class VirtScene(object):
else:
timeout = cmd["timeout"]
- ret = crb.send_expect(
+ ret = node.send_expect(
cmd["command"], expect, timeout=timeout, verify=verify
)
@@ -351,10 +351,10 @@ class VirtScene(object):
def reg_postvm_cmds(self, command):
"""
- command: {'type':'host/tester/vm',
- define which crb command progress
+ command: {'type':'host/tg/vm',
+ define which node command progress
'command':'XXX',
- command send to crb
+ command send to node
'expect':'XXX',
expected output for command
'verify':'yes or no'
@@ -366,13 +366,13 @@ class VirtScene(object):
def run_post_cmds(self):
for cmd in self.post_cmds:
if cmd["type"] == "vm":
- crb = self.vm_dut
+ node = self.vm_sut
elif cmd["type"] == "host":
- crb = self.host_dut
- elif cmd["type"] == "tester":
- crb = self.tester_dut
+ node = self.host_sut
+ elif cmd["type"] == "tg":
+ node = self.tg_sut
else:
- crb = self.host_dut
+ node = self.host_sut
if "expect" not in list(cmd.keys()):
expect = "# "
@@ -389,7 +389,7 @@ class VirtScene(object):
else:
timeout = cmd["timeout"]
- ret = crb.send_expect(
+ ret = node.send_expect(
cmd["command"], expect, timeout=timeout, verify=verify
)
@@ -420,7 +420,7 @@ class VirtScene(object):
for vm_name in list(self.vm_confs.keys()):
# tricky here, QEMUKvm based on suite and vm name
# suite is virt_global, vm_name just the type
- vm = QEMUKvm(self.host_dut, self.vm_type.upper(), "virt_global")
+ vm = QEMUKvm(self.host_sut, self.vm_type.upper(), "virt_global")
vm.load_config()
vm.vm_name = vm_name
vm.set_vm_default()
@@ -431,28 +431,28 @@ class VirtScene(object):
# get cpu topo
topo = self.get_cputopo(scene_params)
try:
- vm_dut = vm.start(
+ vm_sut = vm.start(
load_config=False, set_target=False, cpu_topo=topo
)
- if vm_dut is None:
+ if vm_sut is None:
raise Exception("Set up VM ENV failed!")
vm_info = {}
vm_info[vm_name] = vm
- vm_info[vm_name + "_session"] = vm_dut
+ vm_info[vm_name + "_session"] = vm_sut
self.vms.append(vm_info)
except Exception as e:
print(utils.RED("Failure for %s" % str(e)))
- def get_vm_duts(self):
- duts = []
+ def get_vm_suts(self):
+ sut_nodes = []
for vm_info in self.vms:
for vm_obj in list(vm_info.keys()):
if "session" in vm_obj:
- duts.append(vm_info[vm_obj])
+ sut_nodes.append(vm_info[vm_obj])
- return duts
+ return sut_nodes
def create_scene(self):
self.prepare_vm()
@@ -484,7 +484,7 @@ class VirtScene(object):
if __name__ == "__main__":
class QEMUKvmTmp:
- def __init__(self, dut, vm_name, suite_name):
+ def __init__(self, sut, vm_name, suite_name):
print(vm_name)
print(suite_name)
@@ -504,7 +504,7 @@ if __name__ == "__main__":
emu_dev3 = simple_dev("00:00.3")
emu_dev4 = simple_dev("00:00.4")
- class simple_dut(object):
+ class simple_sut(object):
def __init__(self):
self.ports_info = [
{"vfs_port": [emu_dev1, emu_dev2]},
@@ -527,8 +527,8 @@ if __name__ == "__main__":
def alloc_cpu(self, vm="", number=-1, socket=-1, corelist=None):
print("alloc %s num %d on socket %d" % (vm, number, socket))
- dut = simple_dut()
- scene = VirtScene(dut, None, "vf_passthrough")
+ sut = simple_sut()
+ scene = VirtScene(sut, None, "vf_passthrough")
scene.load_config()
scene.create_scene()
scene.destroy_scene()
diff --git a/framework/virt_dut.py b/framework/virt_sut.py
similarity index 83%
rename from framework/virt_dut.py
rename to framework/virt_sut.py
index b8b7ef29..74f100df 100644
--- a/framework/virt_dut.py
+++ b/framework/virt_sut.py
@@ -10,35 +10,35 @@ import framework.settings as settings
from nics.net_device import GetNicObj, RemoveNicObj
from .config import AppNameConf, PortConf
-from .dut import Dut
-from .project_dpdk import DPDKdut
+from .project_dpdk import DPDKSut
from .settings import LOG_NAME_SEP, NICS, get_netdev, load_global_setting
+from .sut_node import SutNode
from .utils import RED, parallel_lock
-class VirtDut(DPDKdut):
+class VirtSut(DPDKSut):
"""
- A connection to the CRB under test.
- This class sends commands to the CRB and validates the responses. It is
+ A connection to the Node under test.
+ This class sends commands to the Node and validates the responses. It is
implemented using either ssh for linuxapp or the terminal server for
baremetal.
- All operations are in fact delegated to an instance of either CRBLinuxApp
- or CRBBareMetal.
+ All operations are in fact delegated to an instance of either NodeLinuxApp
+ or NodeBareMetal.
"""
def __init__(
- self, hyper, crb, serializer, virttype, vm_name, suite, cpu_topo, dut_id
+ self, hyper, node, serializer, virttype, vm_name, suite, cpu_topo, sut_id
):
- self.vm_ip = crb["IP"]
- self.NAME = "virtdut" + LOG_NAME_SEP + "%s" % self.vm_ip
+ self.vm_ip = node["IP"]
+ self.NAME = "virtsut" + LOG_NAME_SEP + "%s" % self.vm_ip
# do not create addition alt_session
- super(VirtDut, self).__init__(
- crb, serializer, dut_id, self.NAME, alt_session=False
+ super(VirtSut, self).__init__(
+ node, serializer, sut_id, self.NAME, alt_session=False
)
self.vm_name = vm_name
self.hyper = hyper
- self.host_dut = hyper.host_dut
+ self.host_sut = hyper.host_sut
self.cpu_topo = cpu_topo
self.migration_vm = False
@@ -46,7 +46,7 @@ class VirtDut(DPDKdut):
self.suite = suite
self.number_of_cores = 0
- self.tester = None
+ self.tg_node = None
self.cores = []
self.architecture = None
self.ports_map = []
@@ -58,8 +58,8 @@ class VirtDut(DPDKdut):
self.apps_name = {}
def init_log(self):
- if hasattr(self.host_dut, "test_classname"):
- self.logger.config_suite(self.host_dut.test_classname, "virtdut")
+ if hasattr(self.host_sut, "test_classname"):
+ self.logger.config_suite(self.host_sut.test_classname, "virtsut")
def close(self, force=False):
if self.session:
@@ -69,10 +69,10 @@ class VirtDut(DPDKdut):
def set_nic_type(self, nic_type):
"""
- Set CRB NICS ready to validated.
+ Set Node NICS ready to validated.
"""
self.nic_type = nic_type
- # vm_dut config will load from vm configuration file
+ # vm_sut config will load from vm configuration file
@parallel_lock()
def load_portconf(self):
@@ -84,18 +84,18 @@ class VirtDut(DPDKdut):
self.ports_cfg = self.conf.get_ports_config()
@parallel_lock()
- def detect_portmap(self, dut_id):
+ def detect_portmap(self, sut_id):
"""
Detect port mapping with ping6 message, should be locked for protect
- tester operations.
+ TG operations.
"""
- # enable tester port ipv6
- self.host_dut.enable_tester_ipv6()
+ # enable TG port ipv6
+ self.host_sut.enable_tg_ipv6()
self.map_available_ports()
- # disable tester port ipv6
- self.host_dut.disable_tester_ipv6()
+ # disable TG port ipv6
+ self.host_sut.disable_tg_ipv6()
def load_portmap(self):
"""
@@ -110,16 +110,16 @@ class VirtDut(DPDKdut):
continue
if "peer" in list(self.ports_cfg[key].keys()):
- tester_pci = self.ports_cfg[key]["peer"]
- # find tester_pci index
- pci_idx = self.tester.get_local_index(tester_pci)
+ tg_pci = self.ports_cfg[key]["peer"]
+ # find tg_pci index
+ pci_idx = self.tg_node.get_local_index(tg_pci)
self.ports_map[index] = pci_idx
def set_target(self, target, bind_dev=True, driver_name="", driver_mode=""):
"""
Set env variable, these have to be setup all the time. Some tests
need to compile example apps by themselves and will fail otherwise.
- Set hugepage on DUT and install modules required by DPDK.
+ Set hugepage on SUT and install modules required by DPDK.
Configure default ixgbe PMD function.
"""
self.set_toolchain(target)
@@ -142,9 +142,9 @@ class VirtDut(DPDKdut):
def prerequisites(self, pkgName, patch, autodetect_topo):
"""
Prerequest function should be called before execute any test case.
- Will call function to scan all lcore's information which on DUT.
+ Will call function to scan all lcore's information which on SUT.
Then call pci scan function to collect nic device information.
- At last setup DUT' environment for validation.
+ At last setup SUT' environment for validation.
"""
if not self.skip_setup:
self.prepare_package()
@@ -172,8 +172,8 @@ class VirtDut(DPDKdut):
# update with real numa id
self.update_ports()
- # restore dut ports to kernel
- # if current vm is migration vm, skip restore dut ports
+ # restore SUT ports to kernel
+ # if current vm is migration vm, skip restore SUT ports
# because there maybe have some app have run
if not self.migration_vm:
if self.virttype != "XEN":
@@ -194,7 +194,7 @@ class VirtDut(DPDKdut):
else:
# if no config ports in port config file, will auto-detect portmap
if autodetect_topo:
- self.detect_portmap(dut_id=self.dut_id)
+ self.detect_portmap(sut_id=self.sut_id)
# print latest ports_info
for port_info in self.ports_info:
@@ -205,7 +205,7 @@ class VirtDut(DPDKdut):
self.apps_name_conf = name_cfg.load_app_name_conf()
self.apps_name = self.apps_name_conf["meson"]
- # use the dut target directory instead of 'target' string in app name
+ # use the SUT target directory instead of 'target' string in app name
for app in self.apps_name:
cur_app_path = self.apps_name[app].replace("target", self.target)
self.apps_name[app] = cur_app_path + " "
@@ -327,7 +327,7 @@ class VirtDut(DPDKdut):
if vmpci == pci_map["guestpci"]:
hostpci = pci_map["hostpci"]
# search host port info structure
- for hostport in self.host_dut.ports_info:
+ for hostport in self.host_sut.ports_info:
# update port numa
if hostpci == hostport["pci"]:
port["numa"] = hostport["numa"]
@@ -346,7 +346,7 @@ class VirtDut(DPDKdut):
Load or generate network connection mapping list.
"""
self.map_available_ports_uncached()
- self.logger.warning("VM DUT PORT MAP: " + str(self.ports_map))
+ self.logger.warning("VM SUT PORT MAP: " + str(self.ports_map))
def map_available_ports_uncached(self):
"""
@@ -359,20 +359,20 @@ class VirtDut(DPDKdut):
remove = []
self.ports_map = [-1] * nrPorts
- hits = [False] * len(self.tester.ports_info)
+ hits = [False] * len(self.tg_node.ports_info)
for vmPort in range(nrPorts):
vmpci = self.ports_info[vmPort]["pci"]
peer = self.get_peer_pci(vmPort)
# if peer pci configured
if peer is not None:
- for remotePort in range(len(self.tester.ports_info)):
- if self.tester.ports_info[remotePort]["pci"] == peer:
+ for remotePort in range(len(self.tg_node.ports_info)):
+ if self.tg_node.ports_info[remotePort]["pci"] == peer:
hits[remotePort] = True
self.ports_map[vmPort] = remotePort
break
if self.ports_map[vmPort] == -1:
- self.logger.error("CONFIGURED TESTER PORT CANNOT FOUND!!!")
+ self.logger.error("CONFIGURED TG PORT CANNOT FOUND!!!")
else:
continue # skip ping6 map
@@ -384,20 +384,20 @@ class VirtDut(DPDKdut):
break
# auto ping port map
- for remotePort in range(len(self.tester.ports_info)):
- # for two vfs connected to same tester port
+ for remotePort in range(len(self.tg_node.ports_info)):
+ # for two vfs connected to same TG port
# need skip ping from devices on same pf device
- remotepci = self.tester.ports_info[remotePort]["pci"]
- port_type = self.tester.ports_info[remotePort]["type"]
+ remotepci = self.tg_node.ports_info[remotePort]["pci"]
+ port_type = self.tg_node.ports_info[remotePort]["type"]
# IXIA port should not check whether has vfs
if port_type.lower() not in ("ixia", "trex"):
- remoteport = self.tester.ports_info[remotePort]["port"]
+ remoteport = self.tg_node.ports_info[remotePort]["port"]
vfs = []
- # vm_dut and tester in same dut
- host_ip = self.crb["IP"].split(":")[0]
- if self.crb["tester IP"] == host_ip:
+ # vm_sut and TG in same SUT
+ host_ip = self.node["IP"].split(":")[0]
+ if self.node["tg IP"] == host_ip:
vfs = remoteport.get_sriov_vfs_pci()
- # if hostpci is vf of tester port
+ # if hostpci is vf of TG port
if hostpci == remotepci or hostpci in vfs:
print(RED("Skip ping from same PF device"))
continue
@@ -406,13 +406,13 @@ class VirtDut(DPDKdut):
if ipv6 == "Not connected":
continue
- out = self.tester.send_ping6(
+ out = self.tg_node.send_ping6(
remotePort, ipv6, self.get_mac_address(vmPort)
)
if out and "64 bytes from" in out:
self.logger.info(
- "PORT MAP: [dut %d: tester %d]" % (vmPort, remotePort)
+ "PORT MAP: [SUT %d: tg %d]" % (vmPort, remotePort)
)
self.ports_map[vmPort] = remotePort
hits[remotePort] = True
diff --git a/main.py b/main.py
index a4845293..9de539ff 100755
--- a/main.py
+++ b/main.py
@@ -69,7 +69,7 @@ parser = argparse.ArgumentParser(description="DPDK test framework.")
parser.add_argument(
"--config-file",
default="execution.cfg",
- help="configuration file that describes the test " + "cases, DUTs and targets",
+ help="configuration file that describes the test " + "cases, SUTs and targets",
)
parser.add_argument("--git", help="git label to use as input")
@@ -90,15 +90,15 @@ parser.add_argument(
"-s",
"--skip-setup",
action="store_true",
- help="skips all possible setup steps done on both DUT" + " and tester boards.",
+ help="skips all possible setup steps done on both SUT" + " and TG boards.",
)
parser.add_argument(
"-r",
"--read-cache",
action="store_true",
- help="reads the DUT configuration from a cache. If not "
- + "specified, the DUT configuration will be calculated "
+ help="reads the SUT configuration from a cache. If not "
+ + "specified, the SUT configuration will be calculated "
+ "as usual and cached.",
)
@@ -157,8 +157,8 @@ parser.add_argument(
parser.add_argument(
"--commands",
action="append",
- help="run command on tester or dut. The command format is "
- + "[commands]:dut|tester:pre-init|post-init:check|ignore",
+ help="run command on TG or SUT. The command format is "
+ + "[commands]:sut|tg:pre-init|post-init:check|ignore",
)
parser.add_argument("--subtitle", help="add a subtitle to the rst report")
diff --git a/nics/net_device.py b/nics/net_device.py
index 85245d29..59d666d7 100644
--- a/nics/net_device.py
+++ b/nics/net_device.py
@@ -8,7 +8,7 @@ import time
from functools import wraps
import framework.settings as settings
-from framework.crb import Crb
+from framework.node import Node
from framework.settings import HEADER_SIZE, TIMEOUT
from framework.utils import RED
@@ -39,15 +39,15 @@ class NetDevice(object):
Abstract the device which is PF or VF.
"""
- def __init__(self, crb, domain_id, bus_id, devfun_id):
- if not isinstance(crb, Crb):
- raise Exception(" Please input the instance of Crb!!!")
- self.crb = crb
+ def __init__(self, node, domain_id, bus_id, devfun_id):
+ if not isinstance(node, Node):
+ raise Exception(" Please input an instance of Node!!!")
+ self.node = node
self.domain_id = domain_id
self.bus_id = bus_id
self.devfun_id = devfun_id
self.pci = domain_id + ":" + bus_id + ":" + devfun_id
- self.pci_id = get_pci_id(crb, domain_id, bus_id, devfun_id)
+ self.pci_id = get_pci_id(node, domain_id, bus_id, devfun_id)
self.default_driver = settings.get_nic_driver(self.pci_id)
self.name = settings.get_nic_name(self.pci_id)
@@ -74,9 +74,9 @@ class NetDevice(object):
def __send_expect(self, cmds, expected, timeout=TIMEOUT, alt_session=True):
"""
- Wrap the crb`s session as private session for sending expect.
+ Wrap the node`s session as private session for sending expect.
"""
- return self.crb.send_expect(
+ return self.node.send_expect(
cmds, expected, timeout=timeout, alt_session=alt_session
)
@@ -84,7 +84,7 @@ class NetDevice(object):
"""
Get OS type.
"""
- return self.crb.get_os_type()
+ return self.node.get_os_type()
def nic_is_pf(self):
"""
@@ -96,7 +96,7 @@ class NetDevice(object):
"""
Get the NIC driver.
"""
- return self.crb.get_pci_dev_driver(self.domain_id, self.bus_id, self.devfun_id)
+ return self.node.get_pci_dev_driver(self.domain_id, self.bus_id, self.devfun_id)
def get_nic_pkg(self):
"""
@@ -566,13 +566,13 @@ class NetDevice(object):
"""
Get numa number of specified pci device.
"""
- self.crb.get_device_numa(self.domain_id, self.bus_id, self.devfun_id)
+ self.node.get_device_numa(self.domain_id, self.bus_id, self.devfun_id)
def get_card_type(self):
"""
Get card type of specified pci device.
"""
- return self.crb.get_pci_dev_id(self.domain_id, self.bus_id, self.devfun_id)
+ return self.node.get_pci_dev_id(self.domain_id, self.bus_id, self.devfun_id)
@nic_has_driver
def get_nic_speed(self):
@@ -690,7 +690,7 @@ class NetDevice(object):
bus_id = addr_array[1]
devfun_id = addr_array[2]
- self.default_vf_driver = self.crb.get_pci_dev_driver(
+ self.default_vf_driver = self.node.get_pci_dev_driver(
domain_id, bus_id, devfun_id
)
else:
@@ -904,7 +904,7 @@ class NetDevice(object):
self.__send_expect(cmd % (self.intf_name, mtu), "# ")
-def get_pci_id(crb, domain_id, bus_id, devfun_id):
+def get_pci_id(node, domain_id, bus_id, devfun_id):
"""
Return pci device type
"""
@@ -913,14 +913,14 @@ def get_pci_id(crb, domain_id, bus_id, devfun_id):
bus_id,
devfun_id,
)
- out = crb.send_expect(command, "# ").strip()
+ out = node.send_expect(command, "# ").strip()
vendor = out[2:]
command = "cat /sys/bus/pci/devices/%s\:%s\:%s/device" % (
domain_id,
bus_id,
devfun_id,
)
- out = crb.send_expect(command, "# ").strip()
+ out = node.send_expect(command, "# ").strip()
device = out[2:]
return "%s:%s" % (vendor, device)
@@ -945,7 +945,7 @@ def get_from_list(host, domain_id, bus_id, devfun_id):
for nic in NICS_LIST:
if host == nic["host"]:
pci = ":".join((domain_id, bus_id, devfun_id))
- if pci == nic["pci"] and nic["port"].crb.session:
+ if pci == nic["pci"] and nic["port"].node.session:
return nic["port"]
return None
@@ -960,26 +960,26 @@ def remove_from_list(host):
NICS_LIST.remove(nic)
-def GetNicObj(crb, domain_id, bus_id, devfun_id):
+def GetNicObj(node, domain_id, bus_id, devfun_id):
"""
Get network device object. If network device has been initialized, just
return object.
"""
# find existed NetDevice object
- obj = get_from_list(crb.crb["My IP"], domain_id, bus_id, devfun_id)
+ obj = get_from_list(node.node["My IP"], domain_id, bus_id, devfun_id)
if obj:
return obj
# generate NetDevice object
- obj = NetDevice(crb, domain_id, bus_id, devfun_id)
+ obj = NetDevice(node, domain_id, bus_id, devfun_id)
# save NetDevice object to cache, directly get it from cache next time
- add_to_list(crb.crb["My IP"], obj)
+ add_to_list(node.node["My IP"], obj)
return obj
-def RemoveNicObj(crb):
+def RemoveNicObj(node):
"""
Remove network device object.
"""
- remove_from_list(crb.crb["My IP"])
+ remove_from_list(node.node["My IP"])
diff --git a/nics/system_info.py b/nics/system_info.py
index 3e80d4bd..9a1163d1 100644
--- a/nics/system_info.py
+++ b/nics/system_info.py
@@ -12,10 +12,10 @@ from git import Repo
class SystemInfo(object):
- def __init__(self, dut, pci_device_id):
- self.dut = dut
+ def __init__(self, sut_node, pci_device_id):
+ self.sut_node = sut_node
self.pci_device_id = pci_device_id
- self.session = self.dut.session
+ self.session = self.sut_node.session
self.system_info = OrderedDict()
self.nic_info = OrderedDict()
diff --git a/tools/dump_case.py b/tools/dump_case.py
index 257a896c..24dbbbf7 100755
--- a/tools/dump_case.py
+++ b/tools/dump_case.py
@@ -68,13 +68,13 @@ def get_performance_test_cases(test_suite):
return get_cases(test_suite, r"test_perf_")
-class simple_dut(object):
+class simple_sut(object):
def __init__(self):
self.ports_info = []
def load_cases():
- dut = simple_dut()
+ sut = simple_sut()
suite_func_list = {}
suite_perf_list = {}
for suite in suites:
@@ -83,7 +83,7 @@ def load_cases():
"tests." + _suite_full_name, fromlist=[_suite_full_name]
)
for classname, test_class in get_subclasses(test_module, TestCase):
- test_suite = test_class(dut, None, None, suite)
+ test_suite = test_class(sut, None, None, suite)
func_cases = get_functional_test_cases(test_suite)
perf_cases = get_performance_test_cases(test_suite)
suite_func_list[suite] = func_cases
diff --git a/tools/setup.py b/tools/setup.py
index cf768d0a..9684447a 100755
--- a/tools/setup.py
+++ b/tools/setup.py
@@ -23,7 +23,7 @@ DTS_EXECS = DTS_PATH + "/executions"
CONFIG_ROOT_PATH = os.environ.get("DTS_CFG_FOLDER") or os.path.join(DTS_PATH, "conf/")
DTS_EXEC_CFG = os.path.join(DTS_PATH, "execution.cfg")
-DTS_CRBS_CFG = os.path.join(CONFIG_ROOT_PATH, "crbs.cfg")
+DTS_TOPO_CFG = os.path.join(CONFIG_ROOT_PATH, "topology.cfg")
DTS_PORTS_CFG = os.path.join(CONFIG_ROOT_PATH, "ports.cfg")
DTS_IXIA_CFG = os.path.join(CONFIG_ROOT_PATH, "ixia.cfg")
@@ -31,12 +31,12 @@ sys.path.append(DTS_FRAMEWORK)
sys.path.append(DTS_TOOLS)
global def_opt
-global dut_ip
-global tester_ip
+global sut_ip
+global tg_ip
global os_type
-global dut_pass
-global tester_pass
-global dut_user
+global sut_pass
+global tg_pass
+global sut_user
global ixia
global channel
global bypass
@@ -44,8 +44,8 @@ global suites
global executions
def_opt = "0"
-dut_ip = None
-tester_ip = None
+sut_ip = None
+tg_ip = None
ixia = None
@@ -75,53 +75,53 @@ def scan_executions():
executions.append(file_name)
-def config_crbs():
- global dut_ip
- global tester_ip
+def config_nodes():
+ global sut_ip
+ global tg_ip
global os_type
- global dut_pass
- global tester_pass
- global dut_user
+ global sut_pass
+ global tg_pass
+ global sut_user
global ixia
global channel
global bypass
global perf_execution
print("============================================================")
- print("Setting DUT and Tester crb information")
+ print("Setting SUT and TG node information")
ip_option = {
- "prompt": "DUT IP address",
+ "prompt": "SUT IP address",
"type": "ip",
- "help": "Please input ip address of DUT crb",
+ "help": "Please input ip address of SUT node",
"default": "127.0.0.1",
}
opt = Option(**ip_option)
- dut_ip = opt.parse_input()
+ sut_ip = opt.parse_input()
ip_option = {
- "prompt": "Tester IP address",
+ "prompt": "TG IP address",
"type": "ip",
- "help": "Please input ip address of Tester crb",
- "default": dut_ip,
+ "help": "Please input ip address of TG node",
+ "default": sut_ip,
}
opt = Option(**ip_option)
- tester_ip = opt.parse_input()
+ tg_ip = opt.parse_input()
- dut_user = "root"
+ sut_user = "root"
passwd_option = {
- "prompt": "DUT root password",
+ "prompt": "SUT root password",
"type": "string",
- "help": "[INSECURE] Please input password of DUT crb (leave blank to use preconfigured SSH keys)",
+ "help": "[INSECURE] Please input password of SUT node (leave blank to use preconfigured SSH keys)",
"default": "",
}
opt = Option(**passwd_option)
- dut_pass = opt.parse_input()
+ sut_pass = opt.parse_input()
os_option = {
"prompt": "OS type",
"type": "choice",
- "help": "Please choose dut operation system type",
+ "help": "Please choose SUT operation system type",
"options": ["linux", "freebsd"],
"default": "0",
}
@@ -129,13 +129,13 @@ def config_crbs():
os_type = opt.parse_input()
passwd_option = {
- "prompt": "Tester root password",
+ "prompt": "TG root password",
"type": "string",
- "help": "[INSECURE] Please input password of Tester crb (leave blank to use preconfigured SSH keys)",
+ "help": "[INSECURE] Please input password of TG node (leave blank to use preconfigured SSH keys)",
"default": "",
}
opt = Option(**passwd_option)
- tester_pass = opt.parse_input()
+ tg_pass = opt.parse_input()
perf_option = {
"prompt": "Whether run performance execution",
@@ -169,32 +169,32 @@ def config_crbs():
bypass = opt.parse_input()
-def write_crbs_cfg():
+def write_nodes_cfg():
separator = "\n"
content = ""
- section = "[%s]" % dut_ip
+ section = "[%s]" % sut_ip
content += section
content += separator
- crb_conf = [
- ("dut_ip", dut_ip),
- ("dut_user", dut_user),
- ("dut_passwd", dut_pass),
+ node_conf = [
+ ("sut_ip", sut_ip),
+ ("sut_user", sut_user),
+ ("sut_passwd", sut_pass),
("os", os_type),
- ("tester_ip", tester_ip),
- ("tester_passwd", tester_pass),
+ ("tg_ip", tg_ip),
+ ("tg_passwd", tg_pass),
("ixia_group", ixia),
("channels", channel),
("bypass_core0", bypass),
]
- for conf in crb_conf:
+ for conf in node_conf:
key, value = conf
conf_str = "%s=%s" % (key, value)
content += conf_str
content += separator
- with open(DTS_CRBS_CFG, "w") as f:
+ with open(DTS_TOPO_CFG, "w") as f:
f.write(content)
@@ -234,8 +234,8 @@ def config_execution():
print("============================================================")
print("Setting execution plan")
- if not dut_ip:
- print(RED("Need to configure 'DUT&Tester crb' first!!!"))
+ if not sut_ip:
+ print(RED("Need to configure 'SUT&TG node' first!!!"))
return False
# default execution
driver_name = "igb_uio"
@@ -325,13 +325,13 @@ def write_exec_cfg():
separator = "\n"
content = ""
- section = "[%s]" % dut_ip
+ section = "[%s]" % sut_ip
content += section
content += separator
- crb_conf = [("crbs", dut_ip), ("drivername", driver_name)]
+ node_conf = [("nodes", sut_ip), ("drivername", driver_name)]
- for conf in crb_conf:
+ for conf in node_conf:
key, value = conf
conf_str = "%s=%s" % (key, value)
content += conf_str
@@ -372,7 +372,7 @@ def config_ixia():
print(
RED(
"Performance request configure IXIA group in "
- "'DUT&Tester crb' first!!!"
+ "'SUT&TG node' first!!!"
)
)
return False
@@ -439,35 +439,35 @@ def write_ixia_cfg():
def config_ports():
- global dut_ports
- dut_ports = []
+ global sut_ports
+ sut_ports = []
add_more = True
pci_regex = "([\da-f]{4}:[\da-f]{2}:[\da-f]{2}.\d{1})$"
ixia_regex = r"(\d).(\d)"
print("============================================================")
- print("Manually configure DUT port mapping")
- if not dut_ip:
- print(RED("Need to configuure 'DUT&Tester crb' first!!!"))
+ print("Manually configure SUT port mapping")
+ if not sut_ip:
+ print(RED("Need to configuure 'SUT&TG node' first!!!"))
return False
while add_more:
pci_option = {
- "prompt": "DUT port pci address",
+ "prompt": "SUT port pci address",
"type": "string",
- "help": "Please input DUT pci address xxxx:xx:xx.x",
+ "help": "Please input SUT pci address xxxx:xx:xx.x",
"default": "",
}
opt = Option(**pci_option)
- dut_addr = opt.parse_input()
- m = re.match(pci_regex, dut_addr)
+ sut_addr = opt.parse_input()
+ m = re.match(pci_regex, sut_addr)
if not m:
print(RED("Pci address should follow Domain+BDF format!!!"))
continue
if ixia and ixia != "":
pci_option = {
- "prompt": "Choose Tester IXIA port",
+ "prompt": "Choose TG IXIA port",
"type": "choice",
"options": ixia_ports,
"help": "Please choice IXIA port",
@@ -479,9 +479,9 @@ def config_ports():
test_addr = "IXIA%s.%s" % (card, port)
else:
pci_option = {
- "prompt": "Tester port pci address",
+ "prompt": "TG port pci address",
"type": "string",
- "help": "Please input tester pci address xxxx:xx:xx.x",
+ "help": "Please input tg pci address xxxx:xx:xx.x",
"default": "",
}
opt = Option(**pci_option)
@@ -491,12 +491,12 @@ def config_ports():
print(RED("Pci address should follow Domain+BDF format!!!"))
continue
- dut_port = {}
- dut_port[dut_addr] = test_addr
- dut_ports.append(dut_port)
+ sut_port = {}
+ sut_port[sut_addr] = test_addr
+ sut_ports.append(sut_port)
add_option = {
- "prompt": "Whether configure another dut port",
+ "prompt": "Whether configure another SUT port",
"type": "bool",
"help": 'If need more port input "Yes", otherwise ' + 'input "No"',
"default": "No",
@@ -514,14 +514,14 @@ def write_ports_cfg():
separator = "\n"
content = ""
- section = "[%s]" % dut_ip
+ section = "[%s]" % sut_ip
content += section
content += separator
content += "ports="
content += separator
- for port in dut_ports:
+ for port in sut_ports:
pci_addr = list(port.keys())[0]
test_addr = port[pci_addr]
content += " pci=%s,peer=%s;" % (pci_addr, test_addr)
@@ -606,7 +606,7 @@ def main():
"type": "choice",
"help": "Running DTS request preparation few " + "configurations",
"options": [
- "DUT&Tester crb",
+ "SUT&TG node",
"execution plan",
"ixia port for performance",
"port config for manually assign ports",
@@ -619,8 +619,8 @@ def main():
choice = opt.parse_input()
index = opt.choice
if index == 0:
- config_crbs()
- write_crbs_cfg()
+ config_nodes()
+ write_nodes_cfg()
elif index == 1:
if not config_execution():
continue
--
2.17.1
next prev parent reply other threads:[~2022-06-10 5:08 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-10 5:08 [V3 0/5] rename base classes Jun Dong
2022-06-10 5:08 ` [V3 1/5] rename base classes 1 Jun Dong
2022-06-10 12:41 ` Juraj Linkeš
2022-06-10 5:08 ` Jun Dong [this message]
2022-06-10 12:41 ` [V3 2/5] rename base classes 2 Juraj Linkeš
2022-06-10 5:08 ` [V3 3/5] rename base classes 3 Jun Dong
2022-06-10 12:41 ` Juraj Linkeš
2022-06-10 5:08 ` [V3 4/5] rename base classes 4 Jun Dong
2022-06-10 12:42 ` Juraj Linkeš
2022-06-10 5:08 ` [V3 5/5] rename base classes 5 Jun Dong
2022-06-10 12:43 ` Juraj Linkeš
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220610050810.1531-3-junx.dong@intel.com \
--to=junx.dong@intel.com \
--cc=dts@dpdk.org \
--cc=juraj.linkes@pantheon.tech \
--cc=lijuan.tu@intel.com \
--cc=qingx.sun@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).