test suite reviews and discussions
 help / color / mirror / Atom feed
From: Ling Wei <weix.ling@intel.com>
To: dts@dpdk.org
Cc: Ling Wei <weix.ling@intel.com>
Subject: [dts] [PATCH V1] tests/vhost_event_idx_interrupt: add 2 packed ring cbdma testcase sync with testplan
Date: Fri,  2 Apr 2021 15:15:09 +0800	[thread overview]
Message-ID: <20210402071509.66456-1-weix.ling@intel.com> (raw)

Add 2 packed ring cbdma testcase 9 and 10 sync with testplan.

Signed-off-by: Ling Wei <weix.ling@intel.com>
---
 tests/TestSuite_vhost_event_idx_interrupt.py | 136 ++++++++++++-------
 1 file changed, 86 insertions(+), 50 deletions(-)

diff --git a/tests/TestSuite_vhost_event_idx_interrupt.py b/tests/TestSuite_vhost_event_idx_interrupt.py
index af3528d6..e4151c83 100644
--- a/tests/TestSuite_vhost_event_idx_interrupt.py
+++ b/tests/TestSuite_vhost_event_idx_interrupt.py
@@ -199,7 +199,7 @@ class TestVhostEventIdxInterrupt(TestCase):
         start qemus
         """
         for i in range(vm_num):
-            vm_info = VM(self.dut, 'vm%d' % i, 'vhost_event_idx_interrupt')
+            vm_info = VM(self.dut, 'vm%d' % i, 'vm')
             vm_info.load_config()
             vm_params = {}
             vm_params['driver'] = 'vhost-user'
@@ -279,6 +279,50 @@ class TestVhostEventIdxInterrupt(TestCase):
                 session_info[sess_index].send_expect("^c", "#")
                 self.vm_dut[vm_index].close_session(session_info[sess_index])
 
+    def bind_nic_driver(self, ports, driver=""):
+        if driver == "igb_uio":
+            for port in ports:
+                netdev = self.dut.ports_info[port]['port']
+                driver = netdev.get_nic_driver()
+                if driver != 'igb_uio':
+                    netdev.bind_driver(driver='igb_uio')
+        else:
+            for port in ports:
+                netdev = self.dut.ports_info[port]['port']
+                driver_now = netdev.get_nic_driver()
+                if driver == "":
+                    driver = netdev.default_driver
+                if driver != driver_now:
+                    netdev.bind_driver(driver=driver)
+
+    def get_cbdma_ports_info_and_bind_to_dpdk(self):
+        """
+        get all cbdma ports
+        """
+        self.cbdma_dev_infos = []
+        out = self.dut.send_expect('./usertools/dpdk-devbind.py --status-dev misc', '# ', 30)
+        device_info = out.split('\n')
+        for device in device_info:
+            pci_info = re.search('\s*(0000:\d*:\d*.\d*)', device)
+            if pci_info is not None:
+                # dev_info = pci_info.group(1)
+                # the numa id of ioat dev, only add the device which
+                # on same socket with nic dev
+                self.cbdma_dev_infos.append(pci_info.group(1))
+        self.verify(len(self.cbdma_dev_infos) >= self.queues, 'There no enough cbdma device to run this suite')
+        if self.queues == 1:
+            self.cbdma_dev_infos=[self.cbdma_dev_infos[0], self.cbdma_dev_infos[-1]]
+        self.used_cbdma = self.cbdma_dev_infos[0:self.queues*self.vm_num]
+
+        self.device_str = ' '.join(self.used_cbdma)
+        self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s ' % ("igb_uio", self.device_str), '# ', 60)
+
+    def bind_cbdma_device_to_kernel(self):
+        if self.device_str is not None:
+            self.dut.send_expect('modprobe ioatdma', '# ')
+            self.dut.send_expect('./usertools/dpdk-devbind.py -u %s' % self.device_str, '# ', 30)
+            self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma  %s' % self.device_str, '# ', 60)
+
     def stop_all_apps(self):
         """
         close all vms
@@ -289,7 +333,7 @@ class TestVhostEventIdxInterrupt(TestCase):
 
     def test_wake_up_split_ring_vhost_user_core_with_event_idx_interrupt(self):
         """
-        wake up vhost-user core with l3fwd-power sample
+        Test Case 1: wake up split ring vhost-user core with event idx interrupt mode
         """
         self.vm_num = 1
         self.queues = 1
@@ -302,7 +346,7 @@ class TestVhostEventIdxInterrupt(TestCase):
 
     def test_wake_up_split_ring_vhost_user_cores_with_event_idx_interrupt_mode_16_queues(self):
         """
-        wake up vhost-user core with l3fwd-power sample when multi queues are enabled
+        Test Case 2: wake up split ring vhost-user cores with event idx interrupt mode 16 queues test
         """
         self.vm_num = 1
         self.queues = 16
@@ -316,7 +360,7 @@ class TestVhostEventIdxInterrupt(TestCase):
 
     def test_wake_up_split_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_event_idx_interrupt(self):
         """
-        wake up vhost-user cores with l3fwd-power sample and multi VMs
+        Test Case 3: wake up split ring vhost-user cores by multi virtio-net in VMs with event idx interrupt mode
         """
         self.vm_num = 2
         self.queues = 1
@@ -342,7 +386,7 @@ class TestVhostEventIdxInterrupt(TestCase):
 
     def test_wake_up_packed_ring_vhost_user_cores_with_event_idx_interrupt_mode_16_queues(self):
         """
-        wake up vhost-user core with l3fwd-power sample when multi queues are enabled
+        Test Case 5: wake up packed ring vhost-user cores with event idx interrupt mode 16 queues test
         """
         self.vm_num = 1
         self.queues = 16
@@ -356,7 +400,7 @@ class TestVhostEventIdxInterrupt(TestCase):
 
     def test_wake_up_packed_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_event_idx_interrupt(self):
         """
-        wake up vhost-user cores with l3fwd-power sample and multi VMs
+        Test Case 6: wake up packed ring vhost-user cores by multi virtio-net in VMs with event idx interrupt mode
         """
         self.vm_num = 2
         self.queues = 1
@@ -369,7 +413,7 @@ class TestVhostEventIdxInterrupt(TestCase):
 
     def test_wake_up_split_ring_vhost_user_cores_with_event_idx_interrupt_mode_16_queues_with_cbdma(self):
         """
-        wake up vhost-user cores with l3fwd-power sample and multi VMs
+        Test Case 7: wake up split ring vhost-user cores with event idx interrupt mode and cbdma enabled 16 queues test
         """
         self.vm_num = 1
         self.bind_nic_driver(self.dut_ports)
@@ -385,6 +429,9 @@ class TestVhostEventIdxInterrupt(TestCase):
         self.stop_all_apps()
 
     def test_wake_up_split_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_event_idx_interrupt_with_cbdma(self):
+        """
+        Test Case 8: wake up split ring vhost-user cores by multi virtio-net in VMs with event idx interrupt mode and cbdma enabled test
+        """
         self.vm_num = 2
         self.bind_nic_driver(self.dut_ports)
         self.queues = 1
@@ -398,44 +445,39 @@ class TestVhostEventIdxInterrupt(TestCase):
         self.send_and_verify()
         self.stop_all_apps()
 
-    def bind_nic_driver(self, ports, driver=""):
-        if driver == "igb_uio":
-            for port in ports:
-                netdev = self.dut.ports_info[port]['port']
-                driver = netdev.get_nic_driver()
-                if driver != 'igb_uio':
-                    netdev.bind_driver(driver='igb_uio')
-        else:
-            for port in ports:
-                netdev = self.dut.ports_info[port]['port']
-                driver_now = netdev.get_nic_driver()
-                if driver == "":
-                    driver = netdev.default_driver
-                if driver != driver_now:
-                    netdev.bind_driver(driver=driver)
-
-    def get_cbdma_ports_info_and_bind_to_dpdk(self):
+    def test_wake_up_packed_ring_vhost_user_cores_with_event_idx_interrupt_mode_16_queues_with_cbdma(self):
         """
-        get all cbdma ports
+        Test Case 9: wake up packed ring vhost-user cores with event idx interrupt mode and cbdma enabled 16 queues test
         """
-        self.cbdma_dev_infos = []
-        out = self.dut.send_expect('./usertools/dpdk-devbind.py --status-dev misc', '# ', 30)
-        device_info = out.split('\n')
-        for device in device_info:
-            pci_info = re.search('\s*(0000:\d*:\d*.\d*)', device)
-            if pci_info is not None:
-                # dev_info = pci_info.group(1)
-                # the numa id of ioat dev, only add the device which
-                # on same socket with nic dev
-                self.cbdma_dev_infos.append(pci_info.group(1))
-        self.verify(len(self.cbdma_dev_infos) >= self.queues, 'There no enough cbdma device to run this suite')
-        if self.queues == 1:
-            self.cbdma_dev_infos=[self.cbdma_dev_infos[0], self.cbdma_dev_infos[-1]]
-        self.used_cbdma = self.cbdma_dev_infos[0:self.queues*self.vm_num]
+        self.vm_num = 1
+        self.bind_nic_driver(self.dut_ports)
+        self.queues = 16
+        self.get_core_mask()
+        self.nopci=False
+        self.get_cbdma_ports_info_and_bind_to_dpdk()
+        self.lanuch_l3fwd_power(cbdma=True)
+        self.start_vms(vm_num=self.vm_num, packed=True)
+        self.relanuch_l3fwd_power(cbdma=True)
+        self.config_virito_net_in_vm()
+        self.send_and_verify()
+        self.stop_all_apps()
 
-        self.device_str = ' '.join(self.used_cbdma)
-        self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s ' %
-                             ("igb_uio", self.device_str), '# ', 60)
+    def test_wake_up_packed_ring_vhost_user_cores_by_multi_virtio_net_in_vms_with_event_idx_interrupt_with_cbdma(self):
+        """
+        Test Case 10: wake up packed ring vhost-user cores by multi virtio-net in VMs with event idx interrupt mode and cbdma enabled test
+        """
+        self.vm_num = 2
+        self.bind_nic_driver(self.dut_ports)
+        self.queues = 1
+        self.get_core_mask()
+        self.nopci=False
+        self.get_cbdma_ports_info_and_bind_to_dpdk()
+        self.lanuch_l3fwd_power(cbdma=True)
+        self.start_vms(vm_num=self.vm_num,)
+        self.relanuch_l3fwd_power(cbdma=True)
+        self.config_virito_net_in_vm()
+        self.send_and_verify()
+        self.stop_all_apps()
 
     def tear_down(self):
         """
@@ -445,14 +487,8 @@ class TestVhostEventIdxInterrupt(TestCase):
         self.dut.send_expect(f"killall {self.l3fwdpower_name}", "#")
         self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
         self.bind_cbdma_device_to_kernel()
-        self.bind_nic_driver(self.dut_ports, self.drivername)
-
-    def bind_cbdma_device_to_kernel(self):
-        if self.device_str is not None:
-            self.dut.send_expect('modprobe ioatdma', '# ')
-            self.dut.send_expect('./usertools/dpdk-devbind.py -u %s' % self.device_str, '# ', 30)
-            self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=ioatdma  %s' % self.device_str,
-                                 '# ', 60)
+        if "cbdma" in self.running_case:
+            self.bind_nic_driver(self.dut_ports, self.drivername)
 
     def tear_down_all(self):
         """
-- 
2.25.1


             reply	other threads:[~2021-04-02  7:16 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-02  7:15 Ling Wei [this message]
2021-04-02  7:17 ` Ling, WeiX
2021-04-09  2:58 Ling Wei
2021-04-09  3:03 ` Ling, WeiX
2021-04-12  2:47   ` Wang, Yinan
2021-04-12  5:32     ` Tu, Lijuan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210402071509.66456-1-weix.ling@intel.com \
    --to=weix.ling@intel.com \
    --cc=dts@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).