* [dts][PATCH V1] tests/*_cbdma: Optimize calling virtio_common API method
@ 2023-04-18 8:19 Dukai Yuan
2023-04-26 2:51 ` lijuan.tu
0 siblings, 1 reply; 2+ messages in thread
From: Dukai Yuan @ 2023-04-18 8:19 UTC (permalink / raw)
To: dts; +Cc: Dukai Yuan
The tests/virtio_common.py have been optimized, so modify all the cbdma related testsuite
which calls the virtio_common API in cbdma_common() and basic_common () class.
Signed-off-by: Dukai Yuan <dukaix.yuan@intel.com>
---
tests/TestSuite_dpdk_gro_lib_cbdma.py | 69 +-
..._loopback_virtio_user_server_mode_cbdma.py | 440 ++++---
tests/TestSuite_vhost_cbdma.py | 444 +++----
...stSuite_vhost_event_idx_interrupt_cbdma.py | 176 ++-
tests/TestSuite_vhost_user_interrupt_cbdma.py | 96 +-
...tSuite_vhost_virtio_pmd_interrupt_cbdma.py | 219 ++--
...Suite_vhost_virtio_user_interrupt_cbdma.py | 132 +-
...tSuite_virtio_event_idx_interrupt_cbdma.py | 215 ++--
.../TestSuite_vm2vm_virtio_net_perf_cbdma.py | 1078 +++++++----------
tests/TestSuite_vm2vm_virtio_pmd_cbdma.py | 263 ++--
tests/TestSuite_vm2vm_virtio_user_cbdma.py | 432 ++++---
..._pvp_multi_paths_performance_with_cbdma.py | 192 ++-
tests/TestSuite_vswitch_sample_cbdma.py | 339 +++---
tests/virtio_common.py | 96 +-
14 files changed, 1823 insertions(+), 2368 deletions(-)
diff --git a/tests/TestSuite_dpdk_gro_lib_cbdma.py b/tests/TestSuite_dpdk_gro_lib_cbdma.py
index 6d31403b..23a55686 100644
--- a/tests/TestSuite_dpdk_gro_lib_cbdma.py
+++ b/tests/TestSuite_dpdk_gro_lib_cbdma.py
@@ -10,6 +10,8 @@ import tests.vhost_peer_conf as peer
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestDPDKGROLibCbdma(TestCase):
@@ -52,6 +54,8 @@ class TestDPDKGROLibCbdma(TestCase):
self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
self.vhost_user = self.dut.new_session(suite="vhost-user")
self.vhost_pmd = PmdOutput(self.dut, self.vhost_user)
+ self.BC = BC(self)
+ self.CC = CC(self)
def set_up(self):
"""
@@ -61,55 +65,6 @@ class TestDPDKGROLibCbdma(TestCase):
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get all cbdma ports
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def set_testpmd_params(self):
self.vhost_user.send_expect("set fwd csum", "testpmd> ", 120)
self.vhost_user.send_expect("csum mac-swap off 0", "testpmd> ", 120)
@@ -216,22 +171,24 @@ class TestDPDKGROLibCbdma(TestCase):
Test Case1: DPDK GRO test with two queues and cbdma channels using tcp/ipv4 traffic
"""
self.config_kernel_nic_host()
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
"rxq0@%s;"
"rxq1@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
param = "--txd=1024 --rxd=1024 --txq=2 --rxq=2 --nb-cores=2"
eal_param = "--vdev 'net_vhost0,iface=vhost-net,queues=2,dmas=[%s]'" % dmas
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.pci)
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -291,12 +248,12 @@ class TestDPDKGROLibCbdma(TestCase):
"# ",
30,
)
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.dut.send_expect("ip netns del ns1", "# ", 30)
self.dut.send_expect("./usertools/dpdk-devbind.py -u %s" % (self.pci), "# ", 30)
self.dut.send_expect(
diff --git a/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py b/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py
index 22727049..503a71b8 100644
--- a/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py
+++ b/tests/TestSuite_loopback_virtio_user_server_mode_cbdma.py
@@ -7,6 +7,8 @@ import re
from framework.packet import Packet
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestLoopbackVirtioUserServerModeCbama(TestCase):
@@ -32,6 +34,8 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
self.pdump_session = self.dut.new_session(suite="pdump")
self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user)
self.virtio_user_pmd = PmdOutput(self.dut, self.virtio_user)
+ self.CC = CC(self)
+ self.BC = BC(self)
def set_up(self):
"""
@@ -48,13 +52,6 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
]
self.result_table_create(self.table_header)
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
def send_6192_packets_from_vhost(self):
self.vhost_user_pmd.execute_cmd("set txpkts 64,64,64,2000,2000,2000")
self.vhost_user_pmd.execute_cmd("set burst 1")
@@ -163,7 +160,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
launch the testpmd as virtio with vhost_net0
"""
- if self.check_2M_env:
+ if self.BC.check_2M_hugepage_size():
eal_param += " --single-file-segments"
self.virtio_user_pmd.start_testpmd(
cores=cores,
@@ -179,55 +176,6 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
self.virtio_user_pmd.execute_cmd("set fwd rxonly")
self.virtio_user_pmd.execute_cmd("start")
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get and bind cbdma ports into DPDK driver
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def close_all_session(self):
"""
close session of vhost-user and virtio-user
@@ -241,7 +189,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 1: Loopback packed ring inorder mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -256,18 +206,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_eal_param = (
@@ -278,7 +228,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=1,in_order=1,packed_vq=1,server=1"
@@ -300,7 +250,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 2: Loopback packed ring mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -315,18 +267,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -337,7 +289,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=1,in_order=0,packed_vq=1,server=1"
@@ -359,7 +311,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 3: Loopback packed ring inorder non-mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -374,18 +328,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_eal_param = (
@@ -396,7 +350,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=1,packed_vq=1,server=1"
@@ -418,7 +372,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 4: Loopback packed ring non-mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -433,18 +389,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
)
vhost_eal_param = (
@@ -455,7 +411,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=0,packed_vq=1,server=1"
@@ -477,7 +433,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 5: Loopback packed ring vectorized path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -492,18 +450,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -514,7 +472,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=1,packed_vq=1,vectorized=1,server=1"
@@ -536,7 +494,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 6: Loopback packed ring vectorized path and ring size is not power of 2 multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -551,18 +511,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -573,7 +533,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=1,packed_vq=1,vectorized=1,queue_size=1025,server=1"
@@ -595,7 +555,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 7: Loopback split ring inorder mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -610,18 +572,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_eal_param = (
@@ -632,7 +594,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=1,in_order=1,server=1"
@@ -654,7 +616,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 8: Loopback split ring mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -669,18 +633,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -691,7 +655,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=1,in_order=0,server=1"
@@ -713,7 +677,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 9: Loopback split ring inorder non-mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -728,18 +694,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_eal_param = (
@@ -750,7 +716,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=1,server=1"
@@ -772,7 +738,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 10: Loopback split ring non-mergeable path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -787,18 +755,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -809,7 +777,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=0,server=1"
@@ -830,7 +798,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 11: Loopback split ring vectorized path multi-queues payload check with server mode and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -845,18 +815,18 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -867,7 +837,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=0,in_order=1,vectorized=1,server=1"
@@ -889,11 +859,13 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 12: Loopback split ring large chain packets stress test with server mode and cbdma enable
"""
- if not self.check_2M_env:
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ if not self.BC.check_2M_hugepage_size():
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, socket=self.ports_socket
+ )
dmas = "txq0@%s;" "rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'eth_vhost0,iface=vhost-net0,queues=1,client=1,dmas=[%s]'"
@@ -904,7 +876,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,vectorized=1,queue_size=2048"
@@ -924,11 +896,13 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 13: Loopback packed ring large chain packets stress test with server mode and cbdma enable
"""
- if not self.check_2M_env:
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ if not self.BC.check_2M_hugepage_size():
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, socket=self.ports_socket
+ )
dmas = "txq0@%s;" "rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'eth_vhost0,iface=vhost-net0,queues=1,client=1,dmas=[%s]'"
@@ -939,7 +913,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,vectorized=1,packed_vq=1,queue_size=2048"
@@ -957,7 +931,9 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
Test Case 14: PV split and packed ring test txonly mode with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -968,14 +944,14 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"txq6@%s;"
"txq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -986,7 +962,7 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
virtio_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=8,mrg_rxbuf=1,in_order=1,server=1"
@@ -1035,10 +1011,10 @@ class TestLoopbackVirtioUserServerModeCbama(TestCase):
"""
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.kill_all()
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.close_all_session()
diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py
index ab2341f2..d0063302 100644
--- a/tests/TestSuite_vhost_cbdma.py
+++ b/tests/TestSuite_vhost_cbdma.py
@@ -13,6 +13,8 @@ from framework.pktgen import PacketGeneratorHelper
from framework.pmd_output import PmdOutput
from framework.settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting
from framework.test_case import TestCase
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
SPLIT_RING_PATH = {
"inorder_mergeable": "mrg_rxbuf=1,in_order=1",
@@ -56,6 +58,8 @@ class TestVhostCbdma(TestCase):
self.testpmd_name = self.dut.apps_name["test-pmd"].split("/")[-1]
self.save_result_flag = True
self.json_obj = {}
+ self.CC = CC(self)
+ self.BC = BC(self)
def set_up(self):
"""
@@ -73,55 +77,6 @@ class TestVhostCbdma(TestCase):
# self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
self.mode_list = []
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get and bind cbdma ports into DPDK driver
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def get_vhost_port_num(self):
out = self.vhost_user.send_expect("show port summary all", "testpmd> ", 60)
port_num = re.search("Number of available ports:\s*(\d*)", out)
@@ -154,13 +109,6 @@ class TestVhostCbdma(TestCase):
)
self.vhost_user_pmd.execute_cmd("start")
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
def start_vhost_testpmd(
self, cores="Default", param="", eal_param="", ports="", iova_mode="va"
):
@@ -174,7 +122,7 @@ class TestVhostCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("start")
def start_virtio_testpmd(self, cores="Default", param="", eal_param=""):
- if self.check_2M_env:
+ if self.BC.check_2M_hugepage_size():
eal_param += " --single-file-segments"
self.virtio_user_pmd.start_testpmd(
cores=cores, param=param, eal_param=eal_param, no_pci=True, prefix="virtio"
@@ -188,17 +136,19 @@ class TestVhostCbdma(TestCase):
"""
Test Case 1: PVP split ring all path multi-queues vhost async operation with 1 to 1 mapping between vrings and CBDMA virtual channels
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
"rxq0@%s;"
"rxq1@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
)
vhost_eal_param = (
@@ -206,7 +156,7 @@ class TestVhostCbdma(TestCase):
)
vhost_param = "--nb-cores=1 --txq=2 --rxq=2 --txd=1024 --rxd=1024"
allow_pci = [self.dut.ports_info[0]["pci"]]
- for i in self.cbdma_list:
+ for i in cbdmas:
allow_pci.append(i)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -242,7 +192,7 @@ class TestVhostCbdma(TestCase):
self.check_each_queue_of_port_packets(queues=2)
self.virtio_user_pmd.quit()
- if not self.check_2M_env:
+ if not self.BC.check_2M_hugepage_size():
self.vhost_user_pmd.quit()
vhost_eal_param += ",dma-ring-size=4096"
self.start_vhost_testpmd(
@@ -290,7 +240,9 @@ class TestVhostCbdma(TestCase):
"""
Test Case 2: PVP split ring all path multi-queues vhost async operations test with one CBDMA device being shared among multiple tx/rx queues
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -309,22 +261,22 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -332,7 +284,7 @@ class TestVhostCbdma(TestCase):
)
vhost_param = "--nb-cores=4 --txq=8 --rxq=8 --txd=1024 --rxd=1024"
allow_pci = [self.dut.ports_info[0]["pci"]]
- for i in self.cbdma_list:
+ for i in cbdmas:
allow_pci.append(i)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -387,22 +339,22 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_eal_param = (
@@ -440,7 +392,7 @@ class TestVhostCbdma(TestCase):
self.check_each_queue_of_port_packets(queues=8)
self.virtio_user_pmd.quit()
- if not self.check_2M_env:
+ if not self.BC.check_2M_hugepage_size():
self.vhost_user_pmd.quit()
dmas = (
"txq0@%s;"
@@ -460,22 +412,22 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_eal_param = (
@@ -527,10 +479,12 @@ class TestVhostCbdma(TestCase):
"""
Test Case 3: PVP split ring dynamic queue number vhost async operations with cbdma
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;" "txq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[1],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=8,client=1,dmas=[%s],dma-ring-size=64'"
@@ -538,7 +492,7 @@ class TestVhostCbdma(TestCase):
)
vhost_param = "--nb-cores=2 --txq=2 --rxq=2 --txd=1024 --rxd=1024"
allow_pci = [self.dut.ports_info[0]["pci"]]
- for i in self.cbdma_list:
+ for i in cbdmas:
allow_pci.append(i)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -586,10 +540,10 @@ class TestVhostCbdma(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -625,19 +579,19 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -673,19 +627,19 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
)
vhost_eal_param = (
@@ -718,17 +672,19 @@ class TestVhostCbdma(TestCase):
"""
Test Case 4: PVP packed ring all path multi-queues vhost async operation test with each tx/rx queue using one CBDMA device
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
"rxq0@%s;"
"rxq1@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
)
vhost_eal_param = (
@@ -736,7 +692,7 @@ class TestVhostCbdma(TestCase):
)
vhost_param = "--nb-cores=1 --txq=2 --rxq=2 --txd=1024 --rxd=1024"
allow_pci = [self.dut.ports_info[0]["pci"]]
- for i in self.cbdma_list:
+ for i in cbdmas:
allow_pci.append(i)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -773,7 +729,7 @@ class TestVhostCbdma(TestCase):
self.check_each_queue_of_port_packets(queues=2)
self.virtio_user_pmd.quit()
- if not self.check_2M_env:
+ if not self.BC.check_2M_hugepage_size():
self.vhost_user_pmd.quit()
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -821,7 +777,9 @@ class TestVhostCbdma(TestCase):
"""
Test Case 5: PVP packed ring all path multi-queues vhost async operations with M to 1 mapping between vrings and CBDMA virtual channels
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -840,22 +798,22 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_eal_param = (
@@ -863,7 +821,7 @@ class TestVhostCbdma(TestCase):
)
vhost_param = "--nb-cores=4 --txq=8 --rxq=8 --txd=1024 --rxd=1024"
allow_pci = [self.dut.ports_info[0]["pci"]]
- for i in self.cbdma_list:
+ for i in cbdmas:
allow_pci.append(i)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -919,22 +877,22 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_eal_param = (
@@ -972,7 +930,7 @@ class TestVhostCbdma(TestCase):
self.check_each_queue_of_port_packets(queues=8)
self.virtio_user_pmd.quit()
- if not self.check_2M_env:
+ if not self.BC.check_2M_hugepage_size():
self.vhost_user_pmd.quit()
dmas = (
"txq0@%s;"
@@ -992,22 +950,22 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_eal_param = (
@@ -1059,10 +1017,12 @@ class TestVhostCbdma(TestCase):
"""
Test Case 6: PVP packed ring dynamic queue number vhost async operations with M to N mapping between vrings and CBDMA virtual channels
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;" "txq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[1],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=8,client=1,dmas=[%s],dma-ring-size=64'"
@@ -1071,7 +1031,7 @@ class TestVhostCbdma(TestCase):
vhost_param = "--nb-cores=2 --txq=2 --rxq=2 --txd=1024 --rxd=1024"
allow_pci = [self.dut.ports_info[0]["pci"]]
- for i in self.cbdma_list:
+ for i in cbdmas:
allow_pci.append(i)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -1119,10 +1079,10 @@ class TestVhostCbdma(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -1158,19 +1118,19 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_eal_param = (
@@ -1205,19 +1165,19 @@ class TestVhostCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
)
vhost_eal_param = (
@@ -1459,12 +1419,12 @@ class TestVhostCbdma(TestCase):
Run after each test case.
"""
self.dut.send_expect("killall -I %s" % self.testpmd_name, "#", 20)
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.dut.close_session(self.vhost_user)
self.dut.close_session(self.virtio_user)
self.dut.kill_all()
diff --git a/tests/TestSuite_vhost_event_idx_interrupt_cbdma.py b/tests/TestSuite_vhost_event_idx_interrupt_cbdma.py
index bf06f23a..3e7924e5 100644
--- a/tests/TestSuite_vhost_event_idx_interrupt_cbdma.py
+++ b/tests/TestSuite_vhost_event_idx_interrupt_cbdma.py
@@ -7,6 +7,7 @@ import time
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import cbdma_common as CC
class TestVhostEventIdxInterruptCbdma(TestCase):
@@ -27,6 +28,7 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
self.cbdma_dev_infos = []
self.device_str = None
+ self.CC = CC(self)
def set_up(self):
"""
@@ -57,14 +59,7 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
def list_split(self, items, n):
return [items[i : i + n] for i in range(0, len(items), n)]
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
- def launch_l3fwd_power(self):
+ def launch_l3fwd_power(self, cbdmas, dmas1, dmas2=None):
"""
launch l3fwd-power with a virtual vhost device
"""
@@ -91,28 +86,25 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
core_index = core_index + 1
# config the vdev info, if have 2 vms, it shoule have 2 vdev info
vdev_info = ""
- self.cbdma_dev_infos_list = []
- if self.vm_num >= 2:
- self.cbdma_dev_infos_list = self.list_split(
- self.cbdma_dev_infos, int(len(self.cbdma_dev_infos) / self.vm_num)
+ if self.vm_num == 1:
+ vdev_info = (
+ "--vdev 'net_vhost0,iface=%s/vhost-net0,dmas=[%s],queues=%d,client=1' "
+ % (self.base_dir, dmas1, self.queues)
)
- for i in range(self.vm_num):
- dmas = ""
- if self.vm_num == 1:
- for queue in range(self.queues):
- dmas += f"txq{queue}@{self.cbdma_dev_infos[queue]};"
-
- else:
- cbdma_dev_infos = self.cbdma_dev_infos_list[i]
- for index, q in enumerate(cbdma_dev_infos):
- dmas += f"txq{index}@{q};"
- vdev_info += (
- f"--vdev 'net_vhost%d,iface=%s/vhost-net%d,dmas=[{dmas}],queues=%d,client=1' "
- % (i, self.base_dir, i, self.queues)
+ else:
+ vdev_info = (
+ "--vdev 'net_vhost0,iface=%s/vhost-net0,dmas=[%s],queues=%d,client=1' "
+ "--vdev 'net_vhost1,iface=%s/vhost-net1,dmas=[%s],queues=%d,client=1' "
+ % (
+ self.base_dir,
+ dmas1,
+ self.queues,
+ self.base_dir,
+ dmas2,
+ self.queues,
+ )
)
-
port_info = "0x1" if self.vm_num == 1 else "0x3"
-
example_para = self.app_l3fwd_power_path + " "
para = (
" --log-level=9 %s -- -p %s --parse-ptype 1 --config '%s' --interrupt-only"
@@ -121,7 +113,7 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
eal_params = self.dut.create_eal_parameters(
cores=self.core_list_l3fwd,
no_pci=self.nopci,
- ports=self.used_cbdma,
+ ports=cbdmas,
)
command_line_client = example_para + eal_params + para
self.vhost.get_session_before(timeout=2)
@@ -135,7 +127,7 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.logger.info("Launch l3fwd-power sample finished")
self.verify(res is True, "Lanuch l3fwd failed")
- def relaunch_l3fwd_power(self):
+ def relaunch_l3fwd_power(self, cbdmas, dmas1, dmas2=None):
"""
relaunch l3fwd-power sample for port up
"""
@@ -146,7 +138,7 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
)
if pid:
self.dut.send_expect("kill -9 %s" % pid, "#")
- self.launch_l3fwd_power()
+ self.launch_l3fwd_power(cbdmas, dmas1, dmas2)
def set_vm_cpu_number(self, vm_config):
# config the vcpu numbers when queue number greater than 1
@@ -279,51 +271,6 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
session_info[sess_index].send_expect("^c", "#")
self.vm_dut[vm_index].close_session(session_info[sess_index])
- def get_cbdma_ports_info_and_bind_to_dpdk(self):
- """
- get all cbdma ports
- """
- self.cbdma_dev_infos = []
- self.used_cbdma = []
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- # dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which
- # on same socket with nic dev
- self.cbdma_dev_infos.append(pci_info.group(1))
- self.verify(
- len(self.cbdma_dev_infos) >= self.queues,
- "There no enough cbdma device to run this suite",
- )
- if self.queues == 1:
- self.cbdma_dev_infos = [self.cbdma_dev_infos[0], self.cbdma_dev_infos[-1]]
- self.used_cbdma = self.cbdma_dev_infos[0 : self.queues * self.vm_num]
- self.device_str = " ".join(self.used_cbdma)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.device_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- if self.device_str is not None:
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s"
- % self.device_str,
- "# ",
- 60,
- )
-
def stop_all_apps(self):
"""
close all vms
@@ -342,12 +289,30 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.queues = 16
self.get_core_mask()
self.nopci = False
- self.get_cbdma_ports_info_and_bind_to_dpdk()
- self.launch_l3fwd_power()
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = (
+ "rxq0@%s;rxq1@%s;rxq2@%s;rxq3@%s;rxq4@%s;rxq5@%s;rxq6@%s;rxq7@%s;rxq8@%s;rxq9@%s;rxq10@%s"
+ % (
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ )
+ )
+ self.launch_l3fwd_power(cbdmas, dmas1)
self.start_vms(
vm_num=self.vm_num,
)
- self.relaunch_l3fwd_power()
+ self.relaunch_l3fwd_power(cbdmas, dmas1)
self.config_virito_net_in_vm()
self.send_and_verify()
self.stop_all_apps()
@@ -362,12 +327,20 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.queues = 1
self.get_core_mask()
self.nopci = False
- self.get_cbdma_ports_info_and_bind_to_dpdk()
- self.launch_l3fwd_power()
+ cbdmas0 = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=0
+ )
+ cbdmas1 = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=1
+ )
+ dmas1 = "rxq0@%s" % cbdmas0[0]
+ dmas2 = "rxq0@%s" % cbdmas1[0]
+ cbdmas = cbdmas0 + cbdmas1
+ self.launch_l3fwd_power(cbdmas, dmas1, dmas2)
self.start_vms(
vm_num=self.vm_num,
)
- self.relaunch_l3fwd_power()
+ self.relaunch_l3fwd_power(cbdmas, dmas1, dmas2)
self.config_virito_net_in_vm()
self.send_and_verify()
self.stop_all_apps()
@@ -382,10 +355,28 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.queues = 16
self.get_core_mask()
self.nopci = False
- self.get_cbdma_ports_info_and_bind_to_dpdk()
- self.launch_l3fwd_power()
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = (
+ "rxq0@%s;rxq1@%s;rxq2@%s;rxq3@%s;rxq4@%s;rxq5@%s;rxq6@%s;rxq7@%s;rxq8@%s;rxq9@%s;rxq10@%s"
+ % (
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ )
+ )
+ self.launch_l3fwd_power(cbdmas, dmas1)
self.start_vms(vm_num=self.vm_num, packed=True)
- self.relaunch_l3fwd_power()
+ self.relaunch_l3fwd_power(cbdmas, dmas1)
self.config_virito_net_in_vm()
self.send_and_verify()
self.stop_all_apps()
@@ -400,10 +391,18 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.queues = 1
self.get_core_mask()
self.nopci = False
- self.get_cbdma_ports_info_and_bind_to_dpdk()
- self.launch_l3fwd_power()
+ cbdmas0 = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=0
+ )
+ cbdmas1 = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=1
+ )
+ dmas1 = "rxq0@%s" % cbdmas0[0]
+ dmas2 = "rxq0@%s" % cbdmas1[0]
+ cbdmas = cbdmas0 + cbdmas1
+ self.launch_l3fwd_power(cbdmas, dmas1, dmas2)
self.start_vms(vm_num=self.vm_num, packed=True)
- self.relaunch_l3fwd_power()
+ self.relaunch_l3fwd_power(cbdmas, dmas1, dmas2)
self.config_virito_net_in_vm()
self.send_and_verify()
self.stop_all_apps()
@@ -415,10 +414,9 @@ class TestVhostEventIdxInterruptCbdma(TestCase):
self.dut.close_session(self.vhost)
self.dut.send_expect(f"killall {self.l3fwdpower_name}", "#")
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
- pass
+ self.CC.bind_cbdma_to_dpdk_driver(cbdma_idxs="all")
diff --git a/tests/TestSuite_vhost_user_interrupt_cbdma.py b/tests/TestSuite_vhost_user_interrupt_cbdma.py
index ce95e94b..23c8c12a 100644
--- a/tests/TestSuite_vhost_user_interrupt_cbdma.py
+++ b/tests/TestSuite_vhost_user_interrupt_cbdma.py
@@ -12,6 +12,8 @@ import time
import framework.utils as utils
from framework.test_case import TestCase
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestVhostUserInterruptCbdma(TestCase):
@@ -36,6 +38,8 @@ class TestVhostUserInterruptCbdma(TestCase):
self.cbdma_dev_infos = []
self.dmas_info = None
self.device_str = None
+ self.BC = BC(self)
+ self.CC = CC(self)
def set_up(self):
"""
@@ -81,7 +85,7 @@ class TestVhostUserInterruptCbdma(TestCase):
cores=self.core_list_virtio, prefix="virtio", no_pci=True, vdevs=[vdev]
)
- if self.check_2M_env:
+ if self.BC.check_2M_hugepage_size():
eal_params += " --single-file-segments"
para = " -- -i --rxq=%d --txq=%d --rss-ip" % (self.queues, self.queues)
command_line_client = self.app_testpmd_path + " " + eal_params + para
@@ -89,72 +93,6 @@ class TestVhostUserInterruptCbdma(TestCase):
command_line_client, "waiting for client connection...", 120
)
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num):
- """
- get all cbdma ports
- """
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which
- # on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if self.ports_socket == cur_socket:
- self.cbdma_dev_infos.append(pci_info.group(1))
- self.verify(
- len(self.cbdma_dev_infos) >= cbdma_num,
- "There no enough cbdma device to run this suite",
- )
- used_cbdma = self.cbdma_dev_infos[0:cbdma_num]
- tx_dmas_info = ""
- for dmas in used_cbdma:
- number = used_cbdma.index(dmas)
- dmas = "txq{}@{};".format(number, dmas)
- tx_dmas_info += dmas
- rx_dmas_info = ""
- for dmas in used_cbdma:
- number = used_cbdma.index(dmas)
- dmas = "rxq{}@{};".format(number, dmas)
- rx_dmas_info += dmas
- dmas_info = tx_dmas_info + rx_dmas_info
- self.dmas_info = dmas_info[:-1]
- self.device_str = " ".join(used_cbdma)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.device_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- if self.device_str is not None:
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s"
- % self.device_str,
- "# ",
- 60,
- )
-
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
def lanuch_l3fwd_power(self):
"""
launch l3fwd-power with a virtual vhost device
@@ -171,14 +109,29 @@ class TestVhostUserInterruptCbdma(TestCase):
example_cmd = self.app_l3fwd_power_path + " "
example_cmd += " --log-level=9 "
- self.get_cbdma_ports_info_and_bind_to_dpdk(4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas_info = (
+ "txq0@%s;txq1@%s;txq2@%s;txq3@%s;rxq0@%s;rxq1@%s;rxq2@%s;rxq3@%s"
+ % (
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ )
+ )
vdev = "'net_vhost0,iface=vhost-net,queues=%d,client=1,dmas=[%s]'" % (
self.queues,
- self.dmas_info,
+ dmas_info,
)
eal_params = self.dut.create_eal_parameters(
cores=self.core_list_l3fwd,
- ports=self.cbdma_dev_infos[0:4],
+ ports=cbdmas,
vdevs=[vdev],
)
para = " -- -p 0x1 --parse-ptype 1 --config '%s' --interrupt-only" % config_info
@@ -266,10 +219,9 @@ class TestVhostUserInterruptCbdma(TestCase):
self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#")
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.kill_all()
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
- pass
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py b/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py
index 211e68ca..02700933 100644
--- a/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py
+++ b/tests/TestSuite_vhost_virtio_pmd_interrupt_cbdma.py
@@ -16,6 +16,7 @@ from framework.pktgen import PacketGeneratorHelper
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import cbdma_common as CC
class TestVhostVirtioPmdInterruptCbdma(TestCase):
@@ -53,6 +54,7 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
self.vhost_user = self.dut.new_session(suite="vhost-user")
self.vhost_pmd = PmdOutput(self.dut, self.vhost_user)
self.vm_dut = None
+ self.CC = CC(self)
def set_up(self):
"""
@@ -237,55 +239,6 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=True)
self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=True)
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get all cbdma ports
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of DMA dev, only add the device which on same socket with NIC dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def stop_all_apps(self):
"""
close all vms
@@ -299,9 +252,9 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
def test_perf_virtio95_interrupt_test_with_16_queues_and_cbdma_enable(self):
"""
- Test Case1: Basic virtio0.95 interrupt test with 16 queues and cbdma enable
+ Test Case 1: Basic virtio0.95 interrupt test with 16 queues and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -336,45 +289,45 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
)
)
vhost_param = "--nb-cores=16 --rxq=16 --txq=16 --rss-ip"
vhost_eal_param = (
"--vdev 'eth_vhost0,iface=vhost-net,queues=16,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -393,9 +346,11 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
def test_perf_virtio10_interrupt_test_with_4_queues_and_cbdma_enable(self):
"""
- Test Case2: Basic virtio-1.0 interrupt test with 4 queues and cbdma enable
+ Test Case 2: Basic virtio-1.0 interrupt test with 4 queues and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -406,21 +361,21 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
vhost_param = "--nb-cores=4 --rxq=4 --txq=4 --rss-ip"
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=4,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -441,9 +396,11 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
self,
):
"""
- Test Case3: Packed ring virtio interrupt test with 16 queues and cbdma enable
+ Test Case 3: Packed ring virtio interrupt test with 16 queues and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -478,45 +435,45 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_param = "--nb-cores=16 --rxq=16 --txq=16 --rss-ip"
vhost_eal_param = (
"--vdev 'eth_vhost0,iface=vhost-net,queues=16,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -541,10 +498,10 @@ class TestVhostVirtioPmdInterruptCbdma(TestCase):
self.dut.kill_all()
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.dut.close_session(self.vhost_user)
diff --git a/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py b/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py
index 19a45589..c4531a4c 100644
--- a/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py
+++ b/tests/TestSuite_vhost_virtio_user_interrupt_cbdma.py
@@ -14,6 +14,8 @@ import framework.packet as packet
import framework.utils as utils
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestVirtioUserInterruptCbdma(TestCase):
@@ -53,6 +55,8 @@ class TestVirtioUserInterruptCbdma(TestCase):
self.virtio_user = self.dut.new_session(suite="virtio-user")
self.virtio_pmd = PmdOutput(self.dut, self.virtio_user)
self.l3fwd = self.dut.new_session(suite="l3fwd")
+ self.BC = BC(self)
+ self.CC = CC(self)
def set_up(self):
"""
@@ -71,13 +75,6 @@ class TestVirtioUserInterruptCbdma(TestCase):
out = self.dut.build_dpdk_apps("./examples/l3fwd-power")
self.verify("Error" not in out, "compilation l3fwd-power error")
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
def launch_l3fwd(self, path, packed=False, multi_queue=False):
if multi_queue:
queues = 2
@@ -91,7 +88,7 @@ class TestVirtioUserInterruptCbdma(TestCase):
eal_params = self.dut.create_eal_parameters(
cores=self.l3fwd_core_list, prefix="l3fwd-pwd", no_pci=True, vdevs=[vdev]
)
- if self.check_2M_env:
+ if self.BC.check_2M_hugepage_size():
eal_params += " --single-file-segments"
if not multi_queue:
config = " --config='(0,0,%s)'" % self.l3fwd_core_list[0]
@@ -134,55 +131,6 @@ class TestVirtioUserInterruptCbdma(TestCase):
else:
self.logger.error("Wrong link status not right, status is %s" % result)
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get all cbdma ports
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def send_packet(self, multi_queue=False):
pkt_count = 100
pkt = packet.Packet()
@@ -207,16 +155,18 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
Test Case1: Split ring LSC event between vhost-user and virtio-user with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;" "rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_param = ""
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
ports=ports,
@@ -249,16 +199,18 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
Test Case2: Split ring virtio-user interrupt test with vhost-user as backend and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;" "rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_param = "--rxq=1 --txq=1"
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=1,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.logger.info(ports)
self.vhost_pmd.start_testpmd(
@@ -281,16 +233,18 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
Test Case3: Packed ring LSC event between vhost-user and virtio-user with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;" "rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_param = "--tx-offloads=0x00"
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=1,client=0,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
ports=ports,
@@ -323,16 +277,18 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
Test Case4: Packed ring virtio-user interrupt test with vhost-user as backend and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;" "rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_param = "--rxq=1 --txq=1"
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=1,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -354,24 +310,26 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
Test Case 5: Split ring multi-queues virtio-user interrupt test with vhost-user as backend and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"rxq0@%s;"
"txq1@%s;"
"rxq1@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_param = "--rxq=2 --txq=2"
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=2,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.logger.info(ports)
self.vhost_pmd.start_testpmd(
@@ -394,24 +352,26 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
Test Case 6: Packed ring multi-queues virtio-user interrupt test with vhost-user as backend and cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"rxq0@%s;"
"txq1@%s;"
"rxq1@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
vhost_param = "--rxq=2 --txq=2"
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net,queues=2,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.logger.info(ports)
self.vhost_pmd.start_testpmd(
@@ -434,10 +394,10 @@ class TestVirtioUserInterruptCbdma(TestCase):
"""
self.dut.send_expect("killall %s" % self.l3fwdpower_name, "#")
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.close_all_session()
diff --git a/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py b/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py
index 697cfd97..e6e54081 100644
--- a/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py
+++ b/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py
@@ -10,6 +10,8 @@ from framework.pktgen import PacketGeneratorHelper
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestVirtioIdxInterruptCbdma(TestCase):
@@ -40,6 +42,8 @@ class TestVirtioIdxInterruptCbdma(TestCase):
self.testpmd_name = self.app_testpmd_path.split("/")[-1]
self.vhost_user = self.dut.new_session(suite="vhost-user")
self.vhost_pmd = PmdOutput(self.dut, self.vhost_user)
+ self.CC = CC(self)
+ self.BC = BC(self)
def set_up(self):
"""
@@ -59,55 +63,6 @@ class TestVirtioIdxInterruptCbdma(TestCase):
)
self.core_list = self.dut.get_core_list(self.core_config)
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get all cbdma ports
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def start_vms(self, packed=False, mode=False, set_target=False, bind_dev=False):
"""
start qemus
@@ -254,14 +209,16 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"""
Test Case1: Split ring virtio-pci driver reload test with CBDMA enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_param = "--nb-cores=1 --txd=1024 --rxd=1024"
vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[%s]'" % dmas
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -284,7 +241,9 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"""
Test Case2: Split ring 16 queues virtio-net event idx interrupt mode test with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -319,45 +278,45 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_param = "--nb-cores=16 --txd=1024 --rxd=1024 --rxq=16 --txq=16"
vhost_eal_param = (
"--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -381,14 +340,16 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"""
Test Case3: Packed ring virtio-pci driver reload test with CBDMA enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = "txq0@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_param = "--nb-cores=1 --txd=1024 --rxd=1024"
vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[%s]'" % dmas
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -411,7 +372,9 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"""
Test Case4: Packed ring 16 queues virtio-net event idx interrupt mode test with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas = (
"txq0@%s;"
"txq1@%s;"
@@ -446,45 +409,45 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[3],
)
)
vhost_param = "--nb-cores=16 --txd=1024 --rxd=1024 --rxq=16 --txq=16"
vhost_eal_param = (
"--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[%s]'" % dmas
)
- ports = self.cbdma_list
+ ports = cbdmas
ports.append(self.dut.ports_info[0]["pci"])
self.vhost_pmd.start_testpmd(
cores=self.vhost_core_list,
@@ -510,10 +473,10 @@ class TestVirtioIdxInterruptCbdma(TestCase):
"""
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.dut.close_session(self.vhost_user)
diff --git a/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py b/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py
index b19c8636..8fece79f 100644
--- a/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py
+++ b/tests/TestSuite_vm2vm_virtio_net_perf_cbdma.py
@@ -11,6 +11,8 @@ import framework.utils as utils
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestVM2VMVirtioNetPerfCbdma(TestCase):
@@ -31,6 +33,8 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.vhost = self.dut.new_session(suite="vhost")
self.pmdout_vhost_user = PmdOutput(self.dut, self.vhost)
self.app_testpmd_path = self.dut.apps_name["test-pmd"]
+ self.BC = BC(self)
+ self.CC = CC(self)
def set_up(self):
"""
@@ -40,64 +44,6 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.vm_dut = []
self.vm = []
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get and bind cbdma ports into DPDK driver
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- if self.cbdma_str:
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s"
- % self.cbdma_str,
- "# ",
- 60,
- )
-
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
def start_vhost_testpmd(
self, cores, param="", eal_param="", ports="", iova_mode=""
):
@@ -135,86 +81,6 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.vm_dut.append(vm_dut)
self.vm.append(vm_info)
- def config_vm_ip(self):
- """
- set virtio device IP and run arp protocal
- """
- vm1_intf = self.vm_dut[0].ports_info[0]["intf"]
- vm2_intf = self.vm_dut[1].ports_info[0]["intf"]
- self.vm_dut[0].send_expect(
- "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10
- )
- self.vm_dut[1].send_expect(
- "ifconfig %s %s" % (vm2_intf, self.virtio_ip2), "#", 10
- )
- self.vm_dut[0].send_expect(
- "arp -s %s %s" % (self.virtio_ip2, self.virtio_mac2), "#", 10
- )
- self.vm_dut[1].send_expect(
- "arp -s %s %s" % (self.virtio_ip1, self.virtio_mac1), "#", 10
- )
-
- def config_vm_combined(self, combined=1):
- """
- set virtio device combined
- """
- vm1_intf = self.vm_dut[0].ports_info[0]["intf"]
- vm2_intf = self.vm_dut[1].ports_info[0]["intf"]
- self.vm_dut[0].send_expect(
- "ethtool -L %s combined %d" % (vm1_intf, combined), "#", 10
- )
- self.vm_dut[1].send_expect(
- "ethtool -L %s combined %d" % (vm2_intf, combined), "#", 10
- )
-
- def check_ping_between_vms(self):
- ping_out = self.vm_dut[0].send_expect(
- "ping {} -c 4".format(self.virtio_ip2), "#", 20
- )
- self.logger.info(ping_out)
-
- def start_iperf(self):
- """
- run perf command between to vms
- """
- self.vhost.send_expect("clear port xstats all", "testpmd> ", 10)
-
- server = "iperf -s -i 1"
- client = "iperf -c {} -i 1 -t 60".format(self.virtio_ip1)
- self.vm_dut[0].send_expect("{} > iperf_server.log &".format(server), "", 10)
- self.vm_dut[1].send_expect("{} > iperf_client.log &".format(client), "", 10)
- time.sleep(60)
-
- def get_perf_result(self):
- """
- get the iperf test result
- """
- self.table_header = ["Mode", "[M|G]bits/sec"]
- self.result_table_create(self.table_header)
- self.vm_dut[0].send_expect("pkill iperf", "# ")
- self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir)
- fp = open("./iperf_client.log")
- fmsg = fp.read()
- fp.close()
- # remove the server report info from msg
- index = fmsg.find("Server Report")
- if index != -1:
- fmsg = fmsg[:index]
- iperfdata = re.compile("\S*\s*[M|G]bits/sec").findall(fmsg)
- # the last data of iperf is the ave data from 0-30 sec
- self.verify(len(iperfdata) != 0, "The iperf data between to vms is 0")
- self.logger.info("The iperf data between vms is %s" % iperfdata[-1])
-
- # put the result to table
- results_row = ["vm2vm", iperfdata[-1]]
- self.result_table_add(results_row)
-
- # print iperf resut
- self.result_table_print()
- # rm the iperf log file in vm
- self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10)
- self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10)
-
def verify_xstats_info_on_vhost(self):
"""
check both 2VMs can receive and send big packets to each other
@@ -233,39 +99,15 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
int(tx_info.group(1)) > 0, "Port 0 not forward packet greater than 1518"
)
- def check_scp_file_valid_between_vms(self, file_size=1024):
- """
- scp file form VM1 to VM2, check the data is valid
- """
- # default file_size=1024K
- data = ""
- for char in range(file_size * 1024):
- data += random.choice(self.random_string)
- self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ")
- # scp this file to vm1
- out = self.vm_dut[1].send_command(
- "scp root@%s:/tmp/payload /root" % self.virtio_ip1, timeout=10
- )
- if "Are you sure you want to continue connecting" in out:
- self.vm_dut[1].send_command("yes", timeout=10)
- self.vm_dut[1].send_command(self.vm[0].password, timeout=10)
- # get the file info in vm1, and check it valid
- md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ")
- md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ")
- md5_send = md5_send[: md5_send.find(" ")]
- md5_revd = md5_revd[: md5_revd.find(" ")]
- self.verify(
- md5_send == md5_revd, "the received file is different with send file"
- )
-
def test_vm2vm_virtio_net_split_ring_cbdma_enable_test_with_tcp_traffic(self):
"""
Test Case 1: VM2VM virtio-net split ring CBDMA enable test with tcp traffic
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
-
- dmas1 = "txq0@%s;rxq0@%s" % (self.cbdma_list[0], self.cbdma_list[0])
- dmas2 = "txq0@%s;rxq0@%s" % (self.cbdma_list[1], self.cbdma_list[1])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "txq0@%s;rxq0@%s" % (cbdmas[0], cbdmas[0])
+ dmas2 = "txq0@%s;rxq0@%s" % (cbdmas[1], cbdmas[1])
eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=1,tso=1,dmas=[%s]' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=1,tso=1,dmas=[%s]'"
@@ -275,17 +117,17 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on"
self.start_vms(server_mode=False, vm_queue=1)
- self.config_vm_ip()
- self.check_ping_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.check_ping_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.verify_xstats_info_on_vhost()
def test_vm2vm_virtio_net_split_ring_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check(
@@ -294,7 +136,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 2: VM2VM virtio-net split ring mergeable 8 queues CBDMA enable test with large packet payload valid check
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -313,22 +155,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
)
)
dmas2 = (
@@ -349,22 +191,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
)
)
eal_param = (
@@ -375,19 +217,19 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list[0:8],
+ ports=cbdmas[0:8],
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
self.start_vms(server_mode=True, vm_queue=8)
- self.config_vm_ip()
- self.config_vm_combined(combined=8)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.config_2_vms_combined(combined=8)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.pmdout_vhost_user.execute_cmd("quit", "#")
dmas1 = (
@@ -399,13 +241,13 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"txq5@%s;"
"txq6@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
)
dmas2 = (
@@ -417,13 +259,13 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"txq6@%s;"
"txq7@%s"
% (
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
)
eal_param = (
@@ -434,17 +276,17 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list[0:2],
+ ports=cbdmas[0:2],
eal_param=eal_param,
param=param,
iova_mode="va",
)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
- if not self.check_2M_env:
+ if not self.BC.check_2M_hugepage_size():
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -454,13 +296,13 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"txq5@%s;"
"txq6@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
)
)
dmas2 = (
@@ -472,13 +314,13 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq5@%s;"
"rxq6@%s"
% (
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
)
)
eal_param = (
@@ -490,15 +332,15 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.pmdout_vhost_user.execute_cmd("quit", "#")
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list[0:8],
+ ports=cbdmas[0:8],
eal_param=eal_param,
param=param,
iova_mode="pa",
)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.pmdout_vhost_user.execute_cmd("quit", "#")
eal_param = (
@@ -508,15 +350,15 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=4 --rxq=4"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
)
- self.config_vm_combined(combined=4)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_combined(combined=4)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.pmdout_vhost_user.execute_cmd("quit", "#")
@@ -527,14 +369,14 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=1 --txq=1"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
)
- self.config_vm_combined(combined=1)
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_combined(combined=1)
+ self.BC.check_scp_file_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_split_ring_with_non_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check(
self,
@@ -542,7 +384,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 3: VM2VM virtio-net split ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -561,22 +403,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
)
dmas2 = (
@@ -597,22 +439,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
)
)
eal_param = (
@@ -623,19 +465,19 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
self.start_vms(server_mode=True, vm_queue=8)
- self.config_vm_ip()
- self.config_vm_combined(combined=8)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.config_2_vms_combined(combined=8)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.pmdout_vhost_user.execute_cmd("quit", "#")
dmas1 = (
@@ -646,12 +488,12 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"txq4@%s;"
"txq5@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
)
)
@@ -663,12 +505,12 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
)
)
eal_param = (
@@ -679,16 +521,16 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
for _ in range(5):
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.pmdout_vhost_user.execute_cmd("quit", "#")
eal_param = (
@@ -698,16 +540,16 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
- self.config_vm_combined(combined=8)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_combined(combined=8)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.pmdout_vhost_user.execute_cmd("quit", "#")
eal_param = (
@@ -717,15 +559,15 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=1 --rxq=1"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
)
- self.config_vm_combined(combined=1)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_combined(combined=1)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_split_ring_mergeable_16_queues_cbdma_enable_test_with_Rx_Tx_csum_in_SW(
self,
@@ -733,7 +575,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 4: VM2VM virtio-net split ring mergeable 16 queues CBDMA enable test with Rx/Tx csum in SW
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -768,38 +610,38 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
)
)
@@ -837,38 +679,38 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
)
)
eal_param = (
@@ -879,7 +721,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=8 --txd=1024 --rxd=1024 --txq=16 --rxq=16"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
@@ -896,20 +738,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=off,guest_ecn=on,guest_ufo=on,host_ufo=on"
self.start_vms(server_mode=True, vm_queue=16)
- self.config_vm_ip()
- self.config_vm_combined(combined=16)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.config_2_vms_combined(combined=16)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_packed_ring_cbdma_enable_test_with_tcp_traffic(self):
"""
Test Case 5: VM2VM virtio-net packed ring CBDMA enable test with tcp traffic
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
- dmas1 = "txq0@%s;rxq0@%s" % (self.cbdma_list[0], self.cbdma_list[0])
- dmas2 = "txq0@%s;rxq0@%s" % (self.cbdma_list[1], self.cbdma_list[1])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "txq0@%s;rxq0@%s" % (cbdmas[0], cbdmas[0])
+ dmas2 = "txq0@%s;rxq0@%s" % (cbdmas[1], cbdmas[1])
eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=1,tso=1,dmas=[%s]' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=1,tso=1,dmas=[%s]'"
@@ -917,17 +761,17 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=2 --txd=1024 --rxd=1024 --txq=1 --rxq=1"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on"
self.start_vms(server_mode=False, vm_queue=1)
- self.config_vm_ip()
- self.check_ping_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.check_ping_between_2_vms()
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
self.verify_xstats_info_on_vhost()
def test_vm2vm_virtio_net_packed_ring_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check(
@@ -936,7 +780,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 6: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -955,22 +799,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
)
)
dmas2 = (
@@ -991,22 +835,22 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
)
)
eal_param = (
@@ -1017,24 +861,24 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
self.start_vms(server_mode=False, vm_queue=8)
- self.config_vm_ip()
- self.config_vm_combined(combined=8)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.config_2_vms_combined(combined=8)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
for _ in range(5):
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_packed_ring_non_mergeable_8_queues_cbdma_enable_test_with_large_packet_payload_valid_check(
self,
@@ -1042,7 +886,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 7: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -1057,18 +901,18 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
)
)
dmas2 = (
@@ -1085,18 +929,18 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq4@%s;"
"rxq5@%s"
% (
- self.cbdma_list[8],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[13],
+ cbdmas[8],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[13],
)
)
eal_param = (
@@ -1107,24 +951,24 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
self.start_vms(server_mode=False, vm_queue=8)
- self.config_vm_ip()
- self.config_vm_combined(combined=8)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.config_2_vms_combined(combined=8)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
for _ in range(5):
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_packed_ring_mergeable_16_queues_cbdma_enable_test_with_Rx_Tx_csum_in_SW(
self,
@@ -1132,7 +976,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 8: VM2VM virtio-net packed ring mergeable 16 queues CBDMA enabled test with Rx/Tx csum in SW
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -1167,38 +1011,38 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[14],
- self.cbdma_list[15],
- self.cbdma_list[15],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[14],
+ cbdmas[15],
+ cbdmas[15],
)
)
@@ -1236,38 +1080,38 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq14@%s;"
"rxq15@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
)
)
eal_param = (
@@ -1278,7 +1122,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=8 --txd=1024 --rxd=1024 --txq=16 --rxq=16"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
@@ -1296,17 +1140,17 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=off,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
self.start_vms(server_mode=False, vm_queue=16)
- self.config_vm_ip()
- self.config_vm_combined(combined=16)
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.config_2_vms_combined(combined=16)
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
for _ in range(5):
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_packed_ring_cbdma_enable_test_dma_ring_size_with_tcp_traffic(
self,
@@ -1314,9 +1158,11 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 9: VM2VM virtio-net packed ring CBDMA enable test dma-ring-size with tcp traffic
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(2)
- dmas1 = "txq0@%s;rxq0@%s" % (self.cbdma_list[0], self.cbdma_list[0])
- dmas2 = "txq0@%s;rxq0@%s" % (self.cbdma_list[1], self.cbdma_list[1])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "txq0@%s;rxq0@%s" % (cbdmas[0], cbdmas[0])
+ dmas2 = "txq0@%s;rxq0@%s" % (cbdmas[1], cbdmas[1])
eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=1,tso=1,dmas=[%s],dma-ring-size=256' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=1,tso=1,dmas=[%s],dma-ring-size=256'"
@@ -1325,23 +1171,23 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=2 --txd=1024 --rxd=1024 --txq=1 --rxq=1"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on"
self.start_vms(server_mode=False, vm_queue=1)
- self.config_vm_ip()
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
for _ in range(5):
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def test_vm2vm_virtio_net_packed_ring_8_queues_cbdma_enable_test_with_legacy_mode(
self,
@@ -1349,7 +1195,7 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
Test Case 10: VM2VM virtio-net packed ring 8 queues CBDMA enable test with legacy mode
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -1364,18 +1210,18 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[5],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[5],
)
)
dmas2 = (
@@ -1392,18 +1238,18 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"rxq4@%s;"
"rxq5@%s"
% (
- self.cbdma_list[8],
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[13],
+ cbdmas[8],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[13],
)
)
eal_param = (
@@ -1414,23 +1260,23 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --txq=8 --rxq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
eal_param=eal_param,
param=param,
iova_mode="va",
)
self.vm_args = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
self.start_vms(server_mode=False, vm_queue=8)
- self.config_vm_ip()
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
for _ in range(5):
- self.check_ping_between_vms()
- self.check_scp_file_valid_between_vms()
- self.start_iperf()
- self.get_perf_result()
+ self.BC.check_ping_between_2_vms()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ self.BC.check_iperf_result_between_2_vms()
def stop_all_apps(self):
for i in range(len(self.vm)):
@@ -1443,10 +1289,10 @@ class TestVM2VMVirtioNetPerfCbdma(TestCase):
"""
self.stop_all_apps()
self.dut.kill_all()
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.dut.close_session(self.vhost)
diff --git a/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py b/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py
index 7f378937..0507e9ca 100644
--- a/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py
+++ b/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py
@@ -9,6 +9,7 @@ import framework.utils as utils
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import cbdma_common as CC
class TestVM2VMVirtioPmdCBDMA(TestCase):
@@ -24,6 +25,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
self.testpmd_name = self.app_testpmd_path.split("/")[-1]
self.vhost_user = self.dut.new_session(suite="vhost")
self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user)
+ self.CC = CC(self)
def set_up(self):
"""
@@ -45,44 +47,6 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
self.vm_dut = []
self.vm = []
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get all cbdma ports
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
def start_vhost_testpmd(self, cores, ports, prefix, eal_param, param):
"""
launch the testpmd with different parameters
@@ -219,7 +183,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
Test Case 1: VM2VM virtio-pmd split ring mergeable path dynamic queue size with cbdma enable and server mode
"""
self.check_path = ["virtio_dev_rx_async", "virtio_dev_tx_async"]
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -230,14 +194,14 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
dmas2 = (
@@ -250,14 +214,14 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
)
eal_param = (
@@ -269,7 +233,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
prefix="vhost",
eal_param=eal_param,
param=param,
@@ -309,18 +273,18 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
)
dmas2 = (
@@ -337,18 +301,18 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
)
)
eal_param = (
@@ -359,7 +323,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
prefix="vhost",
eal_param=eal_param,
param=param,
@@ -376,7 +340,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
Test Case 2: VM2VM virtio-pmd split ring non-mergeable path dynamic queue size with cbdma enable and server mode
"""
self.check_path = ["virtio_dev_rx_async", "virtio_dev_tx_async"]
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -387,14 +351,14 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
dmas2 = (
@@ -407,14 +371,14 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
eal_param = (
@@ -425,7 +389,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
prefix="vhost",
eal_param=eal_param,
param=param,
@@ -455,7 +419,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
Test Case 3: VM2VM virtio-pmd packed ring mergeable path dynamic queue size with cbdma enable and server mode
"""
self.check_path = ["virtio_dev_rx_async", "virtio_dev_tx_async"]
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -466,14 +430,14 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
dmas2 = (
@@ -486,14 +450,14 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq2@%s;"
"rxq3@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
)
eal_param = (
@@ -504,7 +468,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
prefix="vhost",
eal_param=eal_param,
param=param,
@@ -549,7 +513,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
Test Case 4: VM2VM virtio-pmd packed ring non-mergeable path dynamic queue size with cbdma enable and server mode
"""
self.check_path = ["virtio_dev_rx_async", "virtio_dev_tx_async"]
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(cbdma_num=16, driver_name="vfio-pci")
dmas1 = (
"txq0@%s;"
"txq1@%s;"
@@ -564,18 +528,18 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
)
dmas2 = (
@@ -592,18 +556,18 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
"rxq6@%s;"
"rxq7@%s"
% (
- self.cbdma_list[8],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[9],
- self.cbdma_list[10],
- self.cbdma_list[11],
- self.cbdma_list[12],
- self.cbdma_list[13],
- self.cbdma_list[14],
- self.cbdma_list[15],
+ cbdmas[8],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[9],
+ cbdmas[10],
+ cbdmas[11],
+ cbdmas[12],
+ cbdmas[13],
+ cbdmas[14],
+ cbdmas[15],
)
)
eal_param = (
@@ -614,7 +578,7 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
param = " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
self.start_vhost_testpmd(
cores=self.vhost_core_list,
- ports=self.cbdma_list,
+ ports=cbdmas,
prefix="vhost",
eal_param=eal_param,
param=param,
@@ -644,24 +608,13 @@ class TestVM2VMVirtioPmdCBDMA(TestCase):
self.vm[i].stop()
self.vhost_user.send_expect("quit", "#", 30)
- def bind_cbdma_device_to_kernel(self):
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s" % self.cbdma_str,
- "# ",
- 60,
- )
-
def tear_down(self):
"""
Run after each test case.
"""
self.stop_all_apps()
self.dut.kill_all()
- self.bind_cbdma_device_to_kernel()
+ self.CC.bind_cbdma_to_kernel_driver()
def tear_down_all(self):
"""
diff --git a/tests/TestSuite_vm2vm_virtio_user_cbdma.py b/tests/TestSuite_vm2vm_virtio_user_cbdma.py
index ec0b4a08..db995049 100644
--- a/tests/TestSuite_vm2vm_virtio_user_cbdma.py
+++ b/tests/TestSuite_vm2vm_virtio_user_cbdma.py
@@ -7,6 +7,7 @@ import re
from framework.packet import Packet
from framework.pmd_output import PmdOutput
from framework.test_case import TestCase
+from tests.virtio_common import cbdma_common as CC
class TestVM2VMVirtioUserCbdma(TestCase):
@@ -33,6 +34,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.testpmd_name = self.path.split("/")[-1]
self.app_pdump = self.dut.apps_name["pdump"]
self.pdump_name = self.app_pdump.split("/")[-1]
+ self.CC = CC(self)
def set_up(self):
"""
@@ -50,7 +52,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
get and bind cbdma ports into DPDK driver
"""
self.all_cbdma_list = []
- self.cbdma_list = []
+ cbdmas = []
self.cbdma_str = ""
out = self.dut.send_expect(
"./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
@@ -74,8 +76,8 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.verify(
len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
)
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
+ cbdmas = self.all_cbdma_list[0:cbdma_num]
+ self.cbdma_str = " ".join(cbdmas)
self.dut.send_expect(
"./usertools/dpdk-devbind.py --force --bind=%s %s"
% (self.drivername, self.cbdma_str),
@@ -319,16 +321,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 1: VM2VM split ring non-mergeable path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -342,7 +346,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
virtio1_eal_param = "--vdev=net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,mrg_rxbuf=0,in_order=0,queue_size=4096"
@@ -374,16 +378,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -397,7 +401,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -413,16 +417,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 2: VM2VM split ring inorder non-mergeable path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -436,7 +442,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -465,16 +471,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[1],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[2],
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[3],
+ cbdmas[2],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -488,7 +494,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -504,16 +510,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 3: VM2VM split ring vectorized path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -527,7 +535,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -556,16 +564,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -579,7 +587,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -595,16 +603,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 4: VM2VM split ring inorder mergeable path test non-indirect descriptor with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -616,7 +626,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:2],
+ ports=cbdmas[0:2],
iova_mode="va",
)
@@ -646,16 +656,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.virtio_user1_pmd.execute_cmd("quit", "#", 60)
self.virtio_user0_pmd.execute_cmd("quit", "#", 60)
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -667,7 +677,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_virtio_testpmd_with_vhost_net1(
@@ -692,16 +702,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 5: VM2VM split ring mergeable path test indirect descriptor with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq0@%s;rxq1@%s;rxq0@%s" % (
- self.cbdma_list[1],
- self.cbdma_list[1],
- self.cbdma_list[1],
+ cbdmas[1],
+ cbdmas[1],
+ cbdmas[1],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -713,7 +725,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:2],
+ ports=cbdmas[0:2],
iova_mode="va",
)
@@ -743,16 +755,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.virtio_user1_pmd.execute_cmd("quit", "#", 60)
self.virtio_user0_pmd.execute_cmd("quit", "#", 60)
dmas1 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -764,7 +776,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_virtio_testpmd_with_vhost_net1(
@@ -790,18 +802,20 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 6: VM2VM packed ring non-mergeable path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[1],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[1],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -815,7 +829,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
@@ -848,14 +862,14 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[1],
)
dmas2 = "txq0@%s;rxq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -869,7 +883,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -885,16 +899,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 7: VM2VM packed ring mergeable path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq0@%s;rxq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -908,7 +924,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -937,16 +953,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[4],
- self.cbdma_list[5],
- self.cbdma_list[6],
- self.cbdma_list[7],
+ cbdmas[4],
+ cbdmas[5],
+ cbdmas[6],
+ cbdmas[7],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -960,7 +976,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -976,16 +992,18 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 8: VM2VM packed ring inorder mergeable path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq0@%s;rxq1@%s;rxq0@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -999,7 +1017,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -1028,16 +1046,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -1051,7 +1069,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -1067,9 +1085,11 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 9: VM2VM packed ring inorder non-mergeable path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
- dmas1 = "txq0@%s;txq0@%s" % (self.cbdma_list[0], self.cbdma_list[1])
- dmas2 = "txq1@%s;rxq1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "txq0@%s;txq0@%s" % (cbdmas[0], cbdmas[1])
+ dmas2 = "txq1@%s;rxq1@%s" % (cbdmas[0], cbdmas[1])
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[%s]'"
@@ -1082,7 +1102,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:2],
+ ports=cbdmas[0:2],
iova_mode="va",
)
@@ -1111,16 +1131,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -1134,7 +1154,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -1150,9 +1170,11 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 10: VM2VM packed ring vectorized-rx path multi-queues payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
- dmas1 = "txq0@%s;txq0@%s" % (self.cbdma_list[0], self.cbdma_list[0])
- dmas2 = "txq1@%s;rxq1@%s" % (self.cbdma_list[0], self.cbdma_list[0])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "txq0@%s;txq0@%s" % (cbdmas[0], cbdmas[0])
+ dmas2 = "txq1@%s;rxq1@%s" % (cbdmas[0], cbdmas[0])
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[%s]'"
@@ -1165,7 +1187,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -1194,16 +1216,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -1217,7 +1239,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -1233,18 +1255,20 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 11: VM2VM packed ring vectorized path multi-queues payload check test with ring size is not power of 2 with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=8)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=8, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
- self.cbdma_list[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
+ cbdmas[0],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -1258,7 +1282,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -1287,16 +1311,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.vhost_user_pmd.execute_cmd("quit", "#", 60)
self.clear_virtio_user1_stats()
dmas1 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[0],
- self.cbdma_list[1],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[0],
+ cbdmas[1],
)
dmas2 = "txq0@%s;txq1@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[2],
- self.cbdma_list[3],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[2],
+ cbdmas[3],
+ cbdmas[2],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -1310,7 +1334,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:4],
+ ports=cbdmas[0:4],
iova_mode="va",
)
self.start_pdump_to_capture_pkt()
@@ -1326,9 +1350,11 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 12: VM2VM packed ring vectorized-tx path multi-queues test indirect descriptor and payload check with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
- dmas1 = "rxq0@%s" % (self.cbdma_list[0])
- dmas2 = "txq1@%s" % (self.cbdma_list[0])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "rxq0@%s" % (cbdmas[0])
+ dmas2 = "txq1@%s" % (cbdmas[0])
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[%s]'"
@@ -1339,7 +1365,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list[0:1],
+ ports=cbdmas[0:1],
iova_mode="va",
)
@@ -1369,16 +1395,16 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.virtio_user1_pmd.execute_cmd("quit", "#", 60)
self.virtio_user0_pmd.execute_cmd("quit", "#", 60)
dmas1 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
dmas2 = "txq0@%s;txq0@%s;rxq0@%s;rxq1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[%s]' "
@@ -1390,7 +1416,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
self.start_virtio_testpmd_with_vhost_net1(
@@ -1416,9 +1442,11 @@ class TestVM2VMVirtioUserCbdma(TestCase):
"""
Test Case 13: VM2VM packed ring vectorized-tx path test batch processing with cbdma enable
"""
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
- dmas1 = "txq0@%s;rxq0@%s" % (self.cbdma_list[0], self.cbdma_list[0])
- dmas2 = "txq0@%s;rxq0@%s" % (self.cbdma_list[0], self.cbdma_list[0])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas1 = "txq0@%s;rxq0@%s" % (cbdmas[0], cbdmas[0])
+ dmas2 = "txq0@%s;rxq0@%s" % (cbdmas[0], cbdmas[0])
vhost_eal_param = (
"--vdev 'net_vhost0,iface=vhost-net0,queues=1,client=1,dmas=[%s]' "
"--vdev 'net_vhost1,iface=vhost-net1,queues=1,client=1,dmas=[%s]'"
@@ -1429,7 +1457,7 @@ class TestVM2VMVirtioUserCbdma(TestCase):
cores=self.vhost_core_list,
eal_param=vhost_eal_param,
param=vhost_param,
- ports=self.cbdma_list,
+ ports=cbdmas,
iova_mode="va",
)
@@ -1473,10 +1501,10 @@ class TestVM2VMVirtioUserCbdma(TestCase):
self.dut.kill_all()
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.send_expect("killall -s INT %s" % self.pdump_name, "#")
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.close_all_session()
diff --git a/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py b/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py
index 6ec364c1..c2d505e3 100644
--- a/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py
+++ b/tests/TestSuite_vswitch_pvp_multi_paths_performance_with_cbdma.py
@@ -19,6 +19,8 @@ from framework.pktgen import PacketGeneratorHelper
from framework.pmd_output import PmdOutput
from framework.settings import UPDATE_EXPECTED, load_global_setting
from framework.test_case import TestCase
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
@@ -57,6 +59,8 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.frame_size = [64, 128, 256, 512, 1024, 1518]
self.save_result_flag = True
self.json_obj = {}
+ self.CC = CC(self)
+ self.BC = BC(self)
def set_up(self):
"""
@@ -94,13 +98,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
out = self.dut.build_dpdk_apps("./examples/vhost")
self.verify("Error" not in out, "compilation vhost error")
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
- def start_vhost_app(self, allow_pci):
+ def start_vhost_app(self, allow_pci, cbdmas):
"""
launch the vhost app on vhost side
"""
@@ -112,12 +110,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
params = (
" -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 "
+ socket_file_param
- + " --dmas [{}] --total-num-mbufs 600000"
+ + " --dmas [txd0@{}] --total-num-mbufs 600000"
).format(
self.vhost_core_mask,
self.mem_channels,
allow_option,
- self.dmas_info,
+ cbdmas[0],
)
self.command_line = self.app_path + params
self.vhost_user.send_command(self.command_line)
@@ -137,7 +135,7 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.virtio_user0_mac, virtio_path
)
)
- if self.check_2M_env():
+ if self.BC.check_2M_hugepage_size():
eal_params += " --single-file-segments"
if force_max_simd_bitwidth:
eal_params += " --force-max-simd-bitwidth=512"
@@ -158,58 +156,6 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.virtio_user0_pmd.execute_cmd("stop")
self.virtio_user0_pmd.execute_cmd("start")
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num):
- """
- get all cbdma ports
- """
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if self.ports_socket == cur_socket:
- self.cbdma_dev_infos.append(pci_info.group(1))
- self.verify(
- len(self.cbdma_dev_infos) >= cbdma_num,
- "There no enough cbdma device to run this suite",
- )
- used_cbdma = self.cbdma_dev_infos[0:cbdma_num]
- dmas_info = ""
- for dmas in used_cbdma:
- number = used_cbdma.index(dmas)
- dmas = "txd{}@{},".format(number, dmas)
- dmas_info += dmas
- self.dmas_info = dmas_info[:-1]
- self.device_str = " ".join(used_cbdma)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.device_str),
- "# ",
- 60,
- )
-
- def bind_cbdma_device_to_kernel(self):
- if self.device_str is not None:
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s"
- % self.device_str,
- "# ",
- 60,
- )
-
def config_stream(self, frame_size):
tgen_input = []
rx_port = self.tester.get_local_port(self.dut_ports[0])
@@ -373,12 +319,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=0,mrg_rxbuf=1,in_order=1"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring inorder mergeable"
@@ -397,12 +343,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=0,mrg_rxbuf=0,in_order=1"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring inorder non-mergeable"
@@ -419,12 +365,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=0,mrg_rxbuf=1,in_order=0"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring mergeable"
@@ -443,12 +389,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=0,mrg_rxbuf=0,in_order=0"
self.start_virtio_testpmd(virtio_path=virtio_path, vlan_strip=True)
case_info = "split ring non-mergeable"
@@ -465,12 +411,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=0,mrg_rxbuf=0,in_order=1,vectorized=1"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring vectorized"
@@ -489,12 +435,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=1,mrg_rxbuf=1,in_order=1"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring inorder mergeable"
@@ -513,12 +459,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=1,mrg_rxbuf=0,in_order=1"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring inorder non-mergeable"
@@ -535,12 +481,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=1,mrg_rxbuf=1,in_order=0"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring mergeable"
@@ -559,12 +505,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=1,mrg_rxbuf=0,in_order=0"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring non-mergeable"
@@ -581,12 +527,12 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
self.test_target
]
- cbdma_num = 1
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=cbdma_num)
- allow_pci = [self.dut.ports_info[0]["pci"]]
- for item in range(cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[item])
- self.start_vhost_app(allow_pci=allow_pci)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=1, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ ports = cbdmas
+ ports.append(self.dut.ports_info[0]["pci"])
+ self.start_vhost_app(allow_pci=ports, cbdmas=cbdmas)
virtio_path = "packed_vq=1,mrg_rxbuf=0,in_order=1,vectorized=1"
self.start_virtio_testpmd(virtio_path=virtio_path)
case_info = "split ring vectorized"
@@ -608,10 +554,10 @@ class TestVswitchPvpMultiPathsPerformanceWithCbdma(TestCase):
"""
self.virtio_user0_pmd.quit()
self.vhost_user.send_expect("^C", "# ", 20)
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.close_all_session()
diff --git a/tests/TestSuite_vswitch_sample_cbdma.py b/tests/TestSuite_vswitch_sample_cbdma.py
index d98a62ab..783275f2 100644
--- a/tests/TestSuite_vswitch_sample_cbdma.py
+++ b/tests/TestSuite_vswitch_sample_cbdma.py
@@ -19,6 +19,8 @@ from framework.pmd_output import PmdOutput
from framework.settings import HEADER_SIZE
from framework.test_case import TestCase
from framework.virt_common import VM
+from tests.virtio_common import basic_common as BC
+from tests.virtio_common import cbdma_common as CC
class TestVswitchSampleCBDMA(TestCase):
@@ -64,6 +66,8 @@ class TestVswitchSampleCBDMA(TestCase):
self.virtio_ip0 = "1.1.1.2"
self.virtio_ip1 = "1.1.1.3"
self.headers_size = HEADER_SIZE["eth"] + HEADER_SIZE["ip"]
+ self.BC = BC(self)
+ self.CC = CC(self)
def set_up(self):
"""
@@ -82,14 +86,9 @@ class TestVswitchSampleCBDMA(TestCase):
out = self.dut.build_dpdk_apps("./examples/vhost")
self.verify("Error" not in out, "compilation vhost error")
- @property
- def check_2M_env(self):
- out = self.dut.send_expect(
- "cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
- )
- return True if out == "2048" else False
-
- def start_vhost_app(self, cbdma_num, socket_num, dmas_info, client_mode=False):
+ def start_vhost_app(
+ self, cbdma_num, socket_num, dmas_info, cbdmas, client_mode=False
+ ):
"""
launch the vhost app on vhost side
"""
@@ -99,7 +98,7 @@ class TestVswitchSampleCBDMA(TestCase):
socket_file_param += "--socket-file ./vhost-net{} ".format(item)
allow_pci = [self.dut.ports_info[0]["pci"]]
for item in range(cbdma_num):
- allow_pci.append(self.cbdma_list[item])
+ allow_pci.append(cbdmas[item])
allow_option = ""
for item in allow_pci:
allow_option += " -a {}".format(item)
@@ -123,7 +122,7 @@ class TestVswitchSampleCBDMA(TestCase):
"""
launch the testpmd as virtio with vhost_net0
"""
- if self.check_2M_env:
+ if self.BC.check_2M_hugepage_size():
eal_param += " --single-file-segments"
self.virtio_user0_pmd.start_testpmd(
cores=self.vuser0_core_list,
@@ -138,7 +137,7 @@ class TestVswitchSampleCBDMA(TestCase):
"""
launch the testpmd as virtio with vhost_net1
"""
- if self.check_2M_env:
+ if self.BC.check_2M_hugepage_size():
eal_param += " --single-file-segments"
self.virtio_user1_pmd.start_testpmd(
cores=self.vuser1_core_list,
@@ -207,44 +206,6 @@ class TestVswitchSampleCBDMA(TestCase):
dut.bind_interfaces_linux(driver="vfio-pci")
i += 1
- def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
- """
- get and bind cbdma ports into DPDK driver
- """
- self.all_cbdma_list = []
- self.cbdma_list = []
- self.cbdma_str = ""
- out = self.dut.send_expect(
- "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
- )
- device_info = out.split("\n")
- for device in device_info:
- pci_info = re.search("\s*(0000:\S*:\d*.\d*)", device)
- if pci_info is not None:
- dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which on same socket with nic dev
- bus = int(dev_info[5:7], base=16)
- if bus >= 128:
- cur_socket = 1
- else:
- cur_socket = 0
- if allow_diff_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- else:
- if self.ports_socket == cur_socket:
- self.all_cbdma_list.append(pci_info.group(1))
- self.verify(
- len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
- )
- self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
- self.cbdma_str = " ".join(self.cbdma_list)
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (self.drivername, self.cbdma_str),
- "# ",
- 60,
- )
-
def send_vlan_packet(self, dts_mac, pkt_size=64, pkt_count=1):
"""
Send a vlan packet with vlan id 1000
@@ -262,19 +223,6 @@ class TestVswitchSampleCBDMA(TestCase):
"Can't receive enough packets from tester",
)
- def bind_cbdma_device_to_kernel(self):
- if self.cbdma_str is not None:
- self.dut.send_expect("modprobe ioatdma", "# ")
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
- )
- self.dut.send_expect(
- "./usertools/dpdk-devbind.py --force --bind=ioatdma %s"
- % self.cbdma_str,
- "# ",
- 60,
- )
-
def config_stream(self, frame_size, dst_mac_list):
tgen_input = []
rx_port = self.tester.get_local_port(self.dut_ports[0])
@@ -331,13 +279,19 @@ class TestVswitchSampleCBDMA(TestCase):
self,
):
"""
- Test Case1: PVP performance check with CBDMA channel using vhost async driver
+ Test Case 1: PVP performance check with CBDMA channel using vhost async driver
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
- dmas_info = "txd0@%s,rxd0@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas_info = "txd0@%s,rxd0@%s" % (cbdmas[0], cbdmas[1])
self.start_vhost_app(
- cbdma_num=2, socket_num=1, dmas_info=dmas_info, client_mode=True
+ cbdma_num=2,
+ socket_num=1,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
# packed ring path
@@ -425,18 +379,24 @@ class TestVswitchSampleCBDMA(TestCase):
def test_perf_pvp_test_with_2_vms_using_vhost_async_driver(self):
"""
- Test Case2: PVP test with two VMs using vhost async driver
+ Test Case 2: PVP test with two VMs using vhost async driver
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1"
virtio0_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
@@ -455,9 +415,13 @@ class TestVswitchSampleCBDMA(TestCase):
before_relunch = self.pvp_test_with_multi_cbdma()
self.vhost_user.send_expect("^C", "# ", 20)
- dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+ dmas_info = "txd0@%s,rxd1@%s" % (cbdmas[0], cbdmas[1])
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=True)
self.let_vswitch_know_mac(virtio_pmd=self.virtio_user1_pmd, relaunch=True)
@@ -527,18 +491,24 @@ class TestVswitchSampleCBDMA(TestCase):
def test_vm2vm_virtio_user_forwarding_test_using_vhost_async_driver(self):
"""
- Test Case3: VM2VM virtio-user forwarding test using vhost async driver
+ Test Case 3: VM2VM virtio-user forwarding test using vhost async driver
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1"
virtio0_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
@@ -554,9 +524,13 @@ class TestVswitchSampleCBDMA(TestCase):
before_relunch_result = self.vm2vm_check_with_two_cbdma()
self.vhost_user.send_expect("^C", "# ", 20)
- dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+ dmas_info = "txd0@%s,rxd1@%s" % (cbdmas[0], cbdmas[1])
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
self.virtio_user0_pmd.execute_cmd("stop")
after_relunch_result = self.vm2vm_check_with_two_cbdma()
@@ -611,15 +585,21 @@ class TestVswitchSampleCBDMA(TestCase):
Test Case 4: VM2VM virtio-pmd split ring test with cbdma channels register/unregister stable check
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
before_rebind = self.start_vms_testpmd_and_test(
mrg_rxbuf=True, need_start_vm=True, packed=False
@@ -630,9 +610,13 @@ class TestVswitchSampleCBDMA(TestCase):
self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)
self.vhost_user.send_expect("^C", "# ", 20)
- dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[3])
+ dmas_info = "txd0@%s,rxd1@%s" % (cbdmas[0], cbdmas[3])
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
after_rebind = self.start_vms_testpmd_and_test(
mrg_rxbuf=True, need_start_vm=False, packed=False
@@ -651,87 +635,6 @@ class TestVswitchSampleCBDMA(TestCase):
for i in perf_result:
self.verify(i[2] > 0, "%s Frame Size(Byte) is less than 0 Mpps" % i[0])
- def config_vm_env(self):
- """
- set virtio device IP and run arp protocal
- """
- vm0_intf = self.vm_dut[0].ports_info[0]["intf"]
- vm1_intf = self.vm_dut[1].ports_info[0]["intf"]
- self.vm_dut[0].send_expect(
- "ifconfig %s %s" % (vm0_intf, self.virtio_ip0), "#", 10
- )
- self.vm_dut[1].send_expect(
- "ifconfig %s %s" % (vm1_intf, self.virtio_ip1), "#", 10
- )
- self.vm_dut[0].send_expect(
- "arp -s %s %s" % (self.virtio_ip1, self.vm_dst_mac1), "#", 10
- )
- self.vm_dut[1].send_expect(
- "arp -s %s %s" % (self.virtio_ip0, self.vm_dst_mac0), "#", 10
- )
-
- def start_iperf_test(self):
- """
- run perf command between to vms
- """
- iperf_server = "iperf -f g -s -i 1"
- iperf_client = "iperf -f g -c 1.1.1.2 -i 1 -t 60"
- self.vm_dut[0].send_expect("%s > iperf_server.log &" % iperf_server, "", 10)
- self.vm_dut[1].send_expect("%s > iperf_client.log &" % iperf_client, "", 60)
- time.sleep(90)
-
- def get_iperf_result(self):
- """
- get the iperf test result
- """
- self.vm_dut[0].send_expect("pkill iperf", "# ")
- self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir)
- fp = open("./iperf_client.log")
- fmsg = fp.read()
- fp.close()
- # remove the server report info from msg
- index = fmsg.find("Server Report")
- if index != -1:
- fmsg = fmsg[:index]
- iperfdata = re.compile("\S*\s*[M|G]bits/sec").findall(fmsg)
- # the last data of iperf is the ave data from 0-30 sec
- self.verify(len(iperfdata) != 0, "The iperf data between to vms is 0")
- self.logger.info("The iperf data between vms is %s" % iperfdata[-1])
- self.verify(
- (iperfdata[-1].split()[1]) == "Gbits/sec"
- and float(iperfdata[-1].split()[0]) >= 1,
- "the throughput must be above 1Gbits/sec",
- )
- # rm the iperf log file in vm
- self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10)
- self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10)
- return float(iperfdata[-1].split()[0])
-
- def check_scp_file_valid_between_vms(self, file_size=1024):
- """
- scp file form VM1 to VM2, check the data is valid
- """
- # default file_size=1024K
- data = ""
- for _ in range(file_size * 1024):
- data += random.choice(self.random_string)
- self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ")
- # scp this file to vm1
- out = self.vm_dut[1].send_command(
- "scp root@%s:/tmp/payload /root" % self.virtio_ip0, timeout=5
- )
- if "Are you sure you want to continue connecting" in out:
- self.vm_dut[1].send_command("yes", timeout=3)
- self.vm_dut[1].send_command(self.vm[0].password, timeout=3)
- # get the file info in vm1, and check it valid
- md5_send = self.vm_dut[0].send_expect("md5sum /tmp/payload", "# ")
- md5_revd = self.vm_dut[1].send_expect("md5sum /root/payload", "# ")
- md5_send = md5_send[: md5_send.find(" ")]
- md5_revd = md5_revd[: md5_revd.find(" ")]
- self.verify(
- md5_send == md5_revd, "the received file is different with send file"
- )
-
def start_iperf_and_scp_test_in_vms(
self, mrg_rxbuf=False, need_start_vm=True, packed=False, server_mode=False
):
@@ -745,10 +648,10 @@ class TestVswitchSampleCBDMA(TestCase):
)
self.vm0_pmd = PmdOutput(self.vm_dut[0])
self.vm1_pmd = PmdOutput(self.vm_dut[1])
- self.config_vm_env()
- self.check_scp_file_valid_between_vms()
- self.start_iperf_test()
- iperfdata = self.get_iperf_result()
+ self.BC.config_2_vms_ip()
+ self.BC.check_scp_file_between_2_vms(file_size=1)
+ self.BC.run_iperf_test_between_2_vms()
+ iperfdata = self.BC.check_iperf_result_between_2_vms()
return iperfdata
def test_vm2vm_virtio_pmd_packed_ring_test_with_cbdma_channels_register_and_unregister_stable_check(
@@ -758,15 +661,21 @@ class TestVswitchSampleCBDMA(TestCase):
Test Case 5: VM2VM virtio-pmd packed ring test with cbdma channels register/unregister stable check
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
before_rebind = self.start_vms_testpmd_and_test(
mrg_rxbuf=True, need_start_vm=True, packed=True
@@ -800,15 +709,21 @@ class TestVswitchSampleCBDMA(TestCase):
Test Case 6: VM2VM virtio-net split ring test with 4 cbdma channels and iperf stable check
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
before_relaunch = self.start_iperf_and_scp_test_in_vms(
mrg_rxbuf=False, need_start_vm=True, packed=False, server_mode=True
@@ -817,7 +732,11 @@ class TestVswitchSampleCBDMA(TestCase):
self.vhost_user.send_expect("^C", "# ", 20)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
for _ in range(5):
@@ -825,9 +744,13 @@ class TestVswitchSampleCBDMA(TestCase):
perf_result.append(["split ring", "After rerun test", rerun_result])
self.vhost_user.send_expect("^C", "# ", 20)
- dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+ dmas_info = "txd0@%s,rxd1@%s" % (cbdmas[0], cbdmas[1])
self.start_vhost_app(
- cbdma_num=2, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=2,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
after_relaunch = self.start_iperf_and_scp_test_in_vms(
@@ -847,15 +770,21 @@ class TestVswitchSampleCBDMA(TestCase):
Test Case 7: VM2VM virtio-net packed ring test with 4 cbdma channels and iperf stable check
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=4, driver_name="vfio-pci", socket=self.ports_socket
+ )
dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
- self.cbdma_list[0],
- self.cbdma_list[1],
- self.cbdma_list[2],
- self.cbdma_list[3],
+ cbdmas[0],
+ cbdmas[1],
+ cbdmas[2],
+ cbdmas[3],
)
self.start_vhost_app(
- cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+ cbdma_num=4,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=True,
)
before_relaunch = self.start_iperf_and_scp_test_in_vms(
mrg_rxbuf=False, need_start_vm=True, packed=True, server_mode=True
@@ -878,10 +807,16 @@ class TestVswitchSampleCBDMA(TestCase):
Test Case 8: VM2VM virtio-net packed ring test with 2 cbdma channels and iperf stable check
"""
perf_result = []
- self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
- dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+ cbdmas = self.CC.bind_cbdma_to_dpdk_driver(
+ cbdma_num=2, driver_name="vfio-pci", socket=self.ports_socket
+ )
+ dmas_info = "txd0@%s,rxd1@%s" % (cbdmas[0], cbdmas[1])
self.start_vhost_app(
- cbdma_num=2, socket_num=2, dmas_info=dmas_info, client_mode=False
+ cbdma_num=2,
+ socket_num=2,
+ dmas_info=dmas_info,
+ cbdmas=cbdmas,
+ client_mode=False,
)
before_relaunch = self.start_iperf_and_scp_test_in_vms(
mrg_rxbuf=False, need_start_vm=True, packed=True, server_mode=False
@@ -912,10 +847,10 @@ class TestVswitchSampleCBDMA(TestCase):
self.vm[i].stop()
self.vhost_user.send_expect("^C", "# ", 20)
self.dut.kill_all()
- self.bind_cbdma_device_to_kernel()
def tear_down_all(self):
"""
Run after each test suite.
"""
+ self.CC.bind_cbdma_to_kernel_driver(cbdma_idxs="all")
self.close_all_session()
diff --git a/tests/virtio_common.py b/tests/virtio_common.py
index 9754f49e..cf1bbe43 100644
--- a/tests/virtio_common.py
+++ b/tests/virtio_common.py
@@ -24,7 +24,11 @@ class basic_common(object):
self.vm0_mac = "52:54:00:00:00:01"
self.vm1_mac = "52:54:00:00:00:02"
- def check_2M_env(self):
+ def check_2M_hugepage_size(self):
+ """
+ check the Hugepage size is 2M or not on DUT
+ :return: True or False
+ """
out = self.test_case.dut.send_expect(
"cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# "
)
@@ -32,7 +36,7 @@ class basic_common(object):
def config_2_vms_ip(self):
"""
- config VM interface IP and run arp protocal
+ config VM interface IP address and send ARP request
"""
vm0_intf = self.test_case.vm_dut[0].ports_info[0]["intf"]
vm1_intf = self.test_case.vm_dut[1].ports_info[0]["intf"]
@@ -148,18 +152,19 @@ class basic_common(object):
# rm the iperf log file in vm
self.test_case.vm_dut[0].send_expect("rm iperf_server.log", "#")
self.test_case.vm_dut[1].send_expect("rm iperf_client.log", "#")
+ return iperfdata[-1]
class cbdma_common(object):
def __init__(self, test_case):
self.test_case = test_case
- def get_all_cbdma_pci(self):
+ def get_all_cbdma_pcis(self):
"""
- Get all the CBDMA device PCI of DUT.
- :return: [0000:00:04.0, 0000:00:04.1, 0000:00:04.2, 0000:00:04.3]
+ get all the CBDMA device PCIs on DUT
+ :return: cbdma_pcis, like [0000:00:04.0, 0000:00:04.1, 0000:00:04.2, 0000:00:04.3, 0000:00:04.4]
"""
- cbdma_pci = []
+ cbdma_pcis = []
out = self.test_case.dut.send_expect(
"./usertools/dpdk-devbind.py --status-dev dma", "#"
)
@@ -167,20 +172,24 @@ class cbdma_common(object):
for item in info:
pci = re.search("\s*(0000:\S*:\d*.\d*)", item)
if pci is not None:
- cbdma_pci.append(pci.group(1))
- return cbdma_pci
+ cbdma_pcis.append(pci.group(1))
+ cbdma_pcis.sort()
+ return cbdma_pcis
- def bind_cbdma_to_dpdk(self, cbdma_number, driver_name="vfio-pci", socket=-1):
+ def bind_cbdma_to_dpdk_driver(
+ self, cbdma_num, driver_name="vfio-pci", cbdma_idxs="all", socket=-1
+ ):
"""
- Bind CBDMA device to driver
- :param cbdma_number: number of CBDMA device to be bind.
+ bind CBDMA device to DPDK driver
+ :param cbdma_num: number of CBDMA device to be bind.
:param driver_name: driver name, like `vfio-pci`.
+ :param cbdma_idxs: the index list of DSA device, like [2,3]
:param socket: socket id: like 0 or 1, if socket=-1, use all the CBDMA deveice no matter on which socket.
- :return: bind_cbdma_list, like [0000:00:04.0, 0000:00:04.1]
+ :return: bind_cbdmas, like [0000:00:04.0, 0000:00:04.1]
"""
- cbdma_list = []
- cbdma_pci = self.get_all_cbdma_pci()
- for pci in cbdma_pci:
+ cbdmas = []
+ cbdma_pcis = self.get_all_cbdma_pcis()
+ for pci in cbdma_pcis:
addr_array = pci.split(":")
domain_id, bus_id, devfun_id = addr_array[0], addr_array[1], addr_array[2]
cur_socket = self.test_case.dut.send_expect(
@@ -191,25 +200,38 @@ class cbdma_common(object):
)
if socket != -1:
if int(cur_socket) == socket:
- cbdma_list.append(pci)
+ cbdmas.append(pci)
else:
- cbdma_list.append(pci)
- bind_cbdma_list = cbdma_list[0:cbdma_number]
- bind_cbdma_string = " ".join(bind_cbdma_list)
+ cbdmas.append(pci)
+ if cbdma_idxs == "all":
+ bind_cbdmas = cbdmas[0:cbdma_num]
+ else:
+ tmp_cbdmas = []
+ for i in cbdma_idxs:
+ tmp_cbdmas.append(cbdmas[i])
+ bind_cbdmas = tmp_cbdmas[0:cbdma_num]
+ bind_cbdma_str = " ".join(bind_cbdmas)
self.test_case.dut.send_expect(
"./usertools/dpdk-devbind.py --force --bind=%s %s"
- % (driver_name, bind_cbdma_string),
+ % (driver_name, bind_cbdma_str),
"# ",
60,
)
- return bind_cbdma_list
+ return bind_cbdmas
- def bind_all_cbdma_to_kernel(self):
+ def bind_cbdma_to_kernel_driver(self, cbdma_idxs="all"):
"""
- Check the CBDMA device is bind to kernel driver or not, if not bind to kernel driver, then bind to kernel driver.
+ check the CBDMA device is bind to kernel driver or not,
+ if not bind to kernel driver, then bind to kernel driver.
"""
- cbdma_pci = self.get_all_cbdma_pci()
- for pci in cbdma_pci:
+ cbdma_pcis = self.get_all_cbdma_pcis()
+ pcis = []
+ if cbdma_idxs == "all":
+ pcis = cbdma_pcis
+ else:
+ for cbdma_idx in cbdma_idxs:
+ pcis.append(cbdma_pcis[cbdma_idx])
+ for pci in pcis:
addr_array = pci.split(":")
domain_id, bus_id, devfun_id = addr_array[0], addr_array[1], addr_array[2]
out = self.test_case.dut.send_expect(
@@ -239,8 +261,8 @@ class dsa_common(object):
def get_all_dsa_pcis(self):
"""
- Get all the DSA device PCI of DUT.
- :return: [0000:6a:01.0, 0000:6f:01.0, 0000:74:01.0, 0000:79:01.0]
+ get all the DSA device PCIs on DUT
+ :return: dsa_pcis, like [0000:6a:01.0, 0000:6f:01.0, 0000:74:01.0, 0000:79:01.0]
"""
dsa_pcis = []
out = self.test_case.dut.send_expect(
@@ -256,7 +278,8 @@ class dsa_common(object):
def bind_dsa_to_kernel_driver(self, dsa_idx):
"""
- Get the DSA device current driver
+ check the DSA device is bind to kernel driver or not,
+ if not bind to kernel driver, then bind to kernel driver.
"""
dsa_pcis = self.get_all_dsa_pcis()
pci = dsa_pcis[dsa_idx]
@@ -282,7 +305,7 @@ class dsa_common(object):
def get_all_dsa_idxs(self):
"""
- Get all DSA device work queue index.
+ get all DSA device work queue index
Example: `wq0.0 wq0.1 wq1.0 wq1.1`, return [0, 1]
"""
dsa_idxs = []
@@ -296,7 +319,8 @@ class dsa_common(object):
def check_wq_exist(self, dsa_idx):
"""
- Check DSA device has work queue or not, if has work queue, return True, or return False
+ check DSA device has work queue or not,
+ if has work queue, return True or False
"""
if dsa_idx in self.get_all_dsa_idxs():
return True
@@ -305,7 +329,7 @@ class dsa_common(object):
def reset_wq(self, dsa_idx):
"""
- Reset DSA device work queue which have created work queue.
+ reset DSA device work queue which have created work queue
"""
if self.check_wq_exist(dsa_idx):
self.test_case.dut.send_expect(
@@ -314,9 +338,10 @@ class dsa_common(object):
def create_wq(self, wq_num, dsa_idxs):
"""
- Create work queue by work_queue_number and dsa_idx.
+ create work queue by work_queue_number and dsa_idx
:param wq_num: number of work queue to be create.
:param dsa_idxs: index of DSA device which to create work queue.
+ :return: wqs, like [wq0.0, wq0.1, wq1.0, wq1.1]
Example: wq_num=4, dsa_idx=[0, 1], will create 4 work queue:
root@dpdk:~# ls /dev/dsa/
wq0.0 wq0.1 wq1.0 wq1.1
@@ -343,15 +368,14 @@ class dsa_common(object):
self, dsa_num, driver_name="vfio-pci", dsa_idxs="all", socket=-1
):
"""
- Bind DSA device to driver
+ bind DSA device to driver
:param dsa_num: number of DSA device to be bind.
:param driver_name: driver name, like `vfio-pci`.
:param dsa_idxs: the index list of DSA device, like [2,3]
:param socket: socket id: like 0 or 1, if socket=-1, use all the DSA deveice no matter on which socket.
- :return: bind_dsa_list, like [0000:6a:01.0, 0000:6f:01.0]
+ :return: bind_dsas, like [0000:6a:01.0, 0000:6f:01.0]
"""
dsas = []
- bind_dsas = []
dsa_pcis = self.get_all_dsa_pcis()
for pci in dsa_pcis:
addr_array = pci.split(":")
@@ -373,7 +397,7 @@ class dsa_common(object):
tmp_dsas = []
for i in dsa_idxs:
tmp_dsas.append(dsas[i])
- bind_dsas = tmp_dsas[0:dsa_num]
+ bind_dsas = tmp_dsas[0:dsa_num]
bind_dsas_str = " ".join(bind_dsas)
self.test_case.dut.send_expect(
"./usertools/dpdk-devbind.py --force --bind=%s %s"
--
2.17.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* [dts][PATCH V1] tests/*_cbdma: Optimize calling virtio_common API method
2023-04-18 8:19 [dts][PATCH V1] tests/*_cbdma: Optimize calling virtio_common API method Dukai Yuan
@ 2023-04-26 2:51 ` lijuan.tu
0 siblings, 0 replies; 2+ messages in thread
From: lijuan.tu @ 2023-04-26 2:51 UTC (permalink / raw)
To: dts, Dukai Yuan; +Cc: Dukai Yuan
On Tue, 18 Apr 2023 16:19:01 +0800, Dukai Yuan <dukaix.yuan@intel.com> wrote:
> The tests/virtio_common.py have been optimized, so modify all the cbdma related testsuite
> which calls the virtio_common API in cbdma_common() and basic_common () class.
>
> Signed-off-by: Dukai Yuan <dukaix.yuan@intel.com>
Applied, thanks
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-04-26 2:51 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-18 8:19 [dts][PATCH V1] tests/*_cbdma: Optimize calling virtio_common API method Dukai Yuan
2023-04-26 2:51 ` lijuan.tu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).