From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 83357A04E1; Tue, 22 Sep 2020 07:24:37 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 521E31D72C; Tue, 22 Sep 2020 07:24:37 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id C5B821D71F for ; Tue, 22 Sep 2020 07:24:34 +0200 (CEST) IronPort-SDR: 2INSiQdISfz3z65RzWgbtFgcfhvzgWDYC/vfZSbUmY/pqomMC4z2+32O6vB4esMluwbTVXadYA 7tRulc2acE7Q== X-IronPort-AV: E=McAfee;i="6000,8403,9751"; a="160603250" X-IronPort-AV: E=Sophos;i="5.77,289,1596524400"; d="scan'208";a="160603250" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Sep 2020 22:24:33 -0700 IronPort-SDR: lmSzJFI51qxOz6XXerJAaYQHd6e6/a/dCl56xIve29e5nZYZJx1DUgyrjCnNIrTJihflOB6fso lPL78Q7hWcYw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,289,1596524400"; d="scan'208";a="322096411" Received: from unknown (HELO localhost.localdomain) ([10.240.183.80]) by orsmga002.jf.intel.com with ESMTP; 21 Sep 2020 22:24:32 -0700 From: JiangYuX To: dts@dpdk.org Cc: JiangYu Date: Tue, 22 Sep 2020 13:22:57 +0800 Message-Id: <20200922052257.3943-1-yux.jiang@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dts] [PATCH V1] tests/TestSuite_vhost_virtio_pmd_interrupt: add 2 cbdma scripts X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" From: JiangYu 1, add 2 cbdma scripts; 2, adapt kill app_name for meson build Signed-off-by: JiangYu --- tests/TestSuite_vhost_virtio_pmd_interrupt.py | 75 +++++++++++++++++++++++++-- 1 file changed, 70 insertions(+), 5 deletions(-) diff --git a/tests/TestSuite_vhost_virtio_pmd_interrupt.py b/tests/TestSuite_vhost_virtio_pmd_interrupt.py index f6b1d95..af5e5eb 100644 --- a/tests/TestSuite_vhost_virtio_pmd_interrupt.py +++ b/tests/TestSuite_vhost_virtio_pmd_interrupt.py @@ -36,6 +36,7 @@ vhost virtio pmd interrupt need test with l3fwd-power sample import utils import time +import re from virt_common import VM from test_case import TestCase from packet import Packet @@ -72,6 +73,9 @@ class TestVhostVirtioPmdInterrupt(TestCase): self.base_dir = self.dut.base_dir.replace('~', '/root') self.app_l3fwd_power_path = self.dut.apps_name['l3fwd-power'] self.app_testpmd_path = self.dut.apps_name['test-pmd'] + self.testpmd_name = self.app_testpmd_path.split("/")[-1] + self.l3fwdpower_name = self.app_l3fwd_power_path.split("/")[-1] + self.device_str = None def set_up(self): """ @@ -79,7 +83,7 @@ class TestVhostVirtioPmdInterrupt(TestCase): """ # Clean the execution ENV self.verify_info = [] - self.dut.send_expect("killall -s INT testpmd", "#") + self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") self.vhost_user = self.dut.new_session(suite="vhost-user") @@ -112,15 +116,21 @@ class TestVhostVirtioPmdInterrupt(TestCase): self.vm_dut.send_expect("modprobe vfio-pci", "#") self.vm_dut.ports_info[0]['port'].bind_driver('vfio-pci') - def start_testpmd_on_vhost(self): + def start_testpmd_on_vhost(self, dmas=None): """ start testpmd on vhost side """ # get the core list depend on current nb_cores number self.get_core_list() testcmd = self.app_testpmd_path + " " - vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' % (self.base_dir, self.queues)] - eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=[self.pci_info], vdevs=vdev) + if dmas: + device_str = self.device_str.split(" ") + device_str.append(self.pci_info) + vdev = ["'net_vhost0,iface=%s/vhost-net,queues=%d,dmas=[%s]'" % (self.base_dir, self.queues, dmas)] + eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=device_str, vdevs=vdev) + else: + vdev = ['net_vhost0,iface=%s/vhost-net,queues=%d' % (self.base_dir, self.queues)] + eal_params = self.dut.create_eal_parameters(cores=self.core_list, ports=[self.pci_info], vdevs=vdev) para = " -- -i --nb-cores=%d --rxq=%d --txq=%d --rss-ip" % (self.nb_cores, self.queues, self.queues) command_line_client = testcmd + eal_params + para self.vhost_user.send_expect(command_line_client, "testpmd> ", 120) @@ -266,13 +276,40 @@ class TestVhostVirtioPmdInterrupt(TestCase): self.check_related_cores_status_in_l3fwd(out, "waked up", fix_ip=True) self.check_related_cores_status_in_l3fwd(out, "sleeps", fix_ip=True) + def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num): + """ + get all cbdma ports + """ + self.dut.setup_modules(self.target, "igb_uio","None") + out = self.dut.send_expect('./usertools/dpdk-devbind.py --status-dev misc', '# ', 30) + cbdma_dev_infos = re.findall('\s*(0000:\d+:\d+.\d+)', out) + self.verify(len(cbdma_dev_infos) >= cbdma_num, 'There no enough cbdma device to run this suite') + + used_cbdma = cbdma_dev_infos[0:cbdma_num] + dmas_info = '' + for dmas in used_cbdma: + number = used_cbdma.index(dmas) + dmas = 'txq{}@{};'.format(number, dmas) + dmas_info += dmas + self.dmas_info = dmas_info[:-1] + + self.device_str = ' '.join(used_cbdma) + self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s %s' % + ("igb_uio", self.device_str, self.pci_info), '# ', 60) + + def bind_cbdma_device_to_kernel(self): + if self.device_str is not None: + self.dut.send_expect('modprobe ioatdma', '# ') + self.dut.send_expect('./usertools/dpdk-devbind.py -u %s' % self.device_str, '# ', 30) + self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma %s' % self.device_str, '# ', 60) + def stop_all_apps(self): """ close all vms """ if self.vm_dut is not None: vm_dut2 = self.vm_dut.create_session(name="vm_dut2") - vm_dut2.send_expect("killall l3fwd-power", "# ", 10) + vm_dut2.send_expect("killall %s" % self.l3fwdpower_name, "# ", 10) # self.vm_dut.send_expect("killall l3fwd-power", "# ", 60, alt_session=True) self.vm_dut.send_expect("cp /tmp/main.c ./examples/l3fwd-power/", "#", 15) out = self.vm_dut.build_dpdk_apps('examples/l3fwd-power') @@ -330,12 +367,40 @@ class TestVhostVirtioPmdInterrupt(TestCase): self.launch_l3fwd_power_in_vm() self.send_and_verify() + def test_perf_virtio_interrupt_with_16_queues_and_cbdma_enabled(self): + """ + Test Case 5: Basic virtio interrupt test with 16 queues and cbdma enabled + """ + used_cbdma_num = 16 + self.queues = 16 + self.nb_cores = 16 + self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) + self.start_testpmd_on_vhost(self.dmas_info) + self.start_vms(mode=0) + self.prepare_vm_env() + self.launch_l3fwd_power_in_vm() + self.send_and_verify() + + def test_perf_virtio10_interrupt_with_4_queues_and_cbdma_enabled(self): + """ + Test Case 6: Basic virtio-1.0 interrupt test with 4 queues and cbdma enabled + """ + used_cbdma_num = 4 + self.queues = 4 + self.nb_cores = 4 + self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) + self.start_testpmd_on_vhost(self.dmas_info) + self.start_vms(mode=1) + self.prepare_vm_env() + self.launch_l3fwd_power_in_vm() + self.send_and_verify() def tear_down(self): """ Run after each test case. """ self.stop_all_apps() + self.bind_cbdma_device_to_kernel() self.dut.kill_all() self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") -- 2.7.4