* [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts
@ 2020-09-22 5:22 JiangYuX
2020-09-22 6:04 ` Jiang, YuX
2020-09-28 8:49 ` Tu, Lijuan
0 siblings, 2 replies; 3+ messages in thread
From: JiangYuX @ 2020-09-22 5:22 UTC (permalink / raw)
To: dts; +Cc: JiangYu
From: JiangYu <yux.jiang@intel.com>
1, add 2 cbdma scripts; 2, adapt kill app_name for meson build
Signed-off-by: JiangYu <yux.jiang@intel.com>
---
tests/TestSuite_vhost_virtio_pmd_interrupt.py | 75 +++++++++++++++++++++++++--
1 file changed, 70 insertions(+), 5 deletions(-)
diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt.py b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
index f6b1d95..af5e5eb 100644
--- a/tests/TestSuite_vhost_virtio_pmd_interrupt.py
+++ b/tests/TestSuite_vhost_virtio_pmd_interrupt.py
@@ -36,6 +36,7 @@ vhost virtio pmd interrupt need test with l3fwd-power sample
import utils
import time
+import re
from virt_common import VM
from test_case import TestCase
from packet import Packet
@@ -72,6 +73,9 @@ class TestVhostVirtioPmdInterrupt(TestCase):
self.base_dir = self.dut.base_dir.replace('~', '/root')
self.app_l3fwd_power_path = self.dut.apps_name['l3fwd-power']
self.app_testpmd_path = self.dut.apps_name['test-pmd']
+ self.testpmd_name = self.app_testpmd_path.split("/")[-1]
+ self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1]
+ self.device_str = None
def set_up(self):
"""
@@ -79,7 +83,7 @@ class TestVhostVirtioPmdInterrupt(TestCase):
"""
# Clean the execution ENV
self.verify_info = []
- self.dut.send_expect("killall -s INT testpmd", "#")
+ self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
self.vhost_user = self.dut.new_session(suite="vhost-user")
@@ -112,15 +116,21 @@ class TestVhostVirtioPmdInterrupt(TestCase):
self.vm_dut.send_expect("modprobe vfio-pci", "#")
self.vm_dut.ports_info[0]['port'].bind_driver('vfio-pci')
- def start_testpmd_on_vhost(self):
+ def start_testpmd_on_vhost(self, dmas=None):
"""
start testpmd on vhost side
"""
# get the core list depend on current nb_cores number
self.get_core_list()
testcmd = self.app_testpmd_path + " "
- vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' % (self.base_dir, self.queues)]
- eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=[self.pci_info], vdevs=vdev)
+ if dmas:
+ device_str = self.device_str.split(" ")
+ device_str.append(self.pci_info)
+ vdev = ["'net_vhost0,iface=%s/vhost-net,queues=%d,dmas=[%s]'" % (self.base_dir, self.queues, dmas)]
+ eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=device_str, vdevs=vdev)
+ else:
+ vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' % (self.base_dir, self.queues)]
+ eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=[self.pci_info], vdevs=vdev)
para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --rss-ip" % (self.nb_cores, self.queues, self.queues)
command_line_client = testcmd + eal_params + para
self.vhost_user.send_expect(command_line_client, "testpmd> ", 120)
@@ -266,13 +276,40 @@ class TestVhostVirtioPmdInterrupt(TestCase):
self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=True)
self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=True)
+ def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num):
+ """
+ get all cbdma ports
+ """
+ self.dut.setup_modules(self.target, "igb_uio","None")
+ out = self.dut.send_expect('./usertools/dpdk-devbind.py --status-dev misc', '# ', 30)
+ cbdma_dev_infos = re.findall('\s*(0000:\d+:\d+.\d+)', out)
+ self.verify(len(cbdma_dev_infos) >= cbdma_num, 'There no enough cbdma device to run this suite')
+
+ used_cbdma = cbdma_dev_infos[0:cbdma_num]
+ dmas_info = ''
+ for dmas in used_cbdma:
+ number = used_cbdma.index(dmas)
+ dmas = 'txq{}@{};'.format(number, dmas)
+ dmas_info += dmas
+ self.dmas_info = dmas_info[:-1]
+
+ self.device_str = ' '.join(used_cbdma)
+ self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s %s' %
+ ("igb_uio", self.device_str, self.pci_info), '# ', 60)
+
+ def bind_cbdma_device_to_kernel(self):
+ if self.device_str is not None:
+ self.dut.send_expect('modprobe ioatdma', '# ')
+ self.dut.send_expect('./usertools/dpdk-devbind.py -u %s' % self.device_str, '# ', 30)
+ self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma %s' % self.device_str, '# ', 60)
+
def stop_all_apps(self):
"""
close all vms
"""
if self.vm_dut is not None:
vm_dut2 = self.vm_dut.create_session(name="vm_dut2")
- vm_dut2.send_expect("killall l3fwd-power", "# ", 10)
+ vm_dut2.send_expect("killall %s" % self.l3fwdpower_name, "# ", 10)
# self.vm_dut.send_expect("killall l3fwd-power", "# ", 60, alt_session=True)
self.vm_dut.send_expect("cp /tmp/main.c ./examples/l3fwd-power/", "#", 15)
out = self.vm_dut.build_dpdk_apps('examples/l3fwd-power')
@@ -330,12 +367,40 @@ class TestVhostVirtioPmdInterrupt(TestCase):
self.launch_l3fwd_power_in_vm()
self.send_and_verify()
+ def test_perf_virtio_interrupt_with_16_queues_and_cbdma_enabled(self):
+ """
+ Test Case 5: Basic virtio interrupt test with 16 queues and cbdma enabled
+ """
+ used_cbdma_num = 16
+ self.queues = 16
+ self.nb_cores = 16
+ self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+ self.start_testpmd_on_vhost(self.dmas_info)
+ self.start_vms(mode=0)
+ self.prepare_vm_env()
+ self.launch_l3fwd_power_in_vm()
+ self.send_and_verify()
+
+ def test_perf_virtio10_interrupt_with_4_queues_and_cbdma_enabled(self):
+ """
+ Test Case 6: Basic virtio-1.0 interrupt test with 4 queues and cbdma enabled
+ """
+ used_cbdma_num = 4
+ self.queues = 4
+ self.nb_cores = 4
+ self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+ self.start_testpmd_on_vhost(self.dmas_info)
+ self.start_vms(mode=1)
+ self.prepare_vm_env()
+ self.launch_l3fwd_power_in_vm()
+ self.send_and_verify()
def tear_down(self):
"""
Run after each test case.
"""
self.stop_all_apps()
+ self.bind_cbdma_device_to_kernel()
self.dut.kill_all()
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
--
2.7.4
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts
2020-09-22 5:22 [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts JiangYuX
@ 2020-09-22 6:04 ` Jiang, YuX
2020-09-28 8:49 ` Tu, Lijuan
1 sibling, 0 replies; 3+ messages in thread
From: Jiang, YuX @ 2020-09-22 6:04 UTC (permalink / raw)
To: dts; +Cc: Jiang, YuX
[-- Attachment #1: Type: text/plain, Size: 497 bytes --]
Tested-by: Jiang, YuX <yux.jiang@intel.com>
Best Regards
Jiang yu
> -----Original Message-----
> From: Jiang, YuX
> Sent: Tuesday, September 22, 2020 1:23 PM
> To: dts@dpdk.org
> Cc: Jiang, YuX <yux.jiang@intel.com>
> Subject: [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add
> 2 cbdma scripts
>
> From: JiangYu <yux.jiang@intel.com>
>
> 1, add 2 cbdma scripts; 2, adapt kill app_name for meson build
>
> Signed-off-by: JiangYu <yux.jiang@intel.com>
[-- Attachment #2: TestVhostVirtioPmdInterrupt.log --]
[-- Type: application/octet-stream, Size: 236515 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts
2020-09-22 5:22 [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts JiangYuX
2020-09-22 6:04 ` Jiang, YuX
@ 2020-09-28 8:49 ` Tu, Lijuan
1 sibling, 0 replies; 3+ messages in thread
From: Tu, Lijuan @ 2020-09-28 8:49 UTC (permalink / raw)
To: Jiang, YuX, dts; +Cc: Jiang, YuX
> 1, add 2 cbdma scripts; 2, adapt kill app_name for meson build
>
> Signed-off-by: JiangYu <yux.jiang@intel.com>
Applied
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-09-28 8:49 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-22 5:22 [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts JiangYuX
2020-09-22 6:04 ` Jiang, YuX
2020-09-28 8:49 ` Tu, Lijuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).