test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V2 1/2] tests/multiprocess_iavf: add new cases according to testplan
@ 2022-12-27 17:44 Song Jiale
  2022-12-27 17:44 ` [dts] [PATCH V2 2/2] test_plans/multiprocess_iavf: add 2 cases Song Jiale
  0 siblings, 1 reply; 3+ messages in thread
From: Song Jiale @ 2022-12-27 17:44 UTC (permalink / raw)
  To: dts; +Cc: Song Jiale

1. add two cases according to testplan.
2. the optimization script sorts the results that match the regular.

Signed-off-by: Song Jiale <songx.jiale@intel.com>
---

v2:
-optimization the method of check_port_status.
-sorts the results that match the regular.

 tests/TestSuite_multiprocess_iavf.py | 100 ++++++++++++++++++++++++++-
 1 file changed, 99 insertions(+), 1 deletion(-)

diff --git a/tests/TestSuite_multiprocess_iavf.py b/tests/TestSuite_multiprocess_iavf.py
index bb8e0c98..dbd660be 100644
--- a/tests/TestSuite_multiprocess_iavf.py
+++ b/tests/TestSuite_multiprocess_iavf.py
@@ -476,7 +476,7 @@ class TestMultiprocessIavf(TestCase):
         )
         pkt_num = kwargs.get("pkt_num")
         res = self.get_pkt_statistic(out, **kwargs)
-        res_num = res["rx-total"]
+        res_num = res["rx-packets"]
         self.verify(
             res_num == pkt_num,
             "fail: got wrong number of packets, expect pakcet number {}, got {}".format(
@@ -804,6 +804,19 @@ class TestMultiprocessIavf(TestCase):
             "some subcases failed, detail as below:{}".format(msg),
         )
 
+    def check_port_status(self, pmd_output, port_id, status=True):
+        port_status = pmd_output.get_port_link_status(port_id)
+        if status:
+            self.verify(
+                port_status == "up",
+                "The expected link state is up, but the actual status is down",
+            )
+        else:
+            self.verify(
+                port_status == "down",
+                "The expected link state is down, but the actual status is up",
+            )
+
     def test_multiprocess_simple_mpbasicoperation(self):
         """
         Basic operation.
@@ -1068,6 +1081,7 @@ class TestMultiprocessIavf(TestCase):
             r"Port \d+\s+-\s+rx:\s+(?P<rx>\d+)\s+tx:.*PORTS", out, re.DOTALL
         )
         rx_num = re.findall(r"Client\s+\d\s+-\s+rx:\s+(\d+)", res.group(0))
+        rx_num.sort(reverse=True)
         for i in range(proc_num):
             self.verify(
                 int(rx_num[i]) > 0,
@@ -1677,6 +1691,90 @@ class TestMultiprocessIavf(TestCase):
         }
         self.rte_flow(mac_ipv4_symmetric, self.multiprocess_rss_data, **pmd_param)
 
+    def test_multiprocess_port_stop(self):
+        packets = [
+            'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/Raw(load="P"*20)',
+        ]
+        # start testpmd multi-process
+        self.launch_multi_testpmd(
+            proc_type="auto",
+            queue_num=8,
+            process_num=2,
+        )
+        for pmd_output in self.pmd_output_list:
+            pmd_output.execute_cmd("stop")
+        # set primary process port stop
+        self.pmd_output_list[0].execute_cmd("port stop 0")
+        self.pmd_output_list[1].execute_cmd("start")
+        fdir_pro = fdirprocess(
+            self,
+            self.pmd_output_list[1],
+            self.tester_ifaces,
+            rxq=8,
+        )
+        out = self.send_pkt_get_output(fdir_pro, packets, port_id=0, count=1)
+        # Check that no packet was received
+        self.check_pkt_num(out, port_id=0, pkt_num=0)
+        for pmd_output in self.pmd_output_list:
+            pmd_output.quit()
+
+        # start testpmd multi-process
+        self.launch_multi_testpmd(
+            proc_type="auto",
+            queue_num=8,
+            process_num=2,
+        )
+        for pmd_output in self.pmd_output_list:
+            pmd_output.execute_cmd("stop")
+        # set secondary process port stop
+        self.pmd_output_list[1].execute_cmd("port stop 0")
+        self.pmd_output_list[0].execute_cmd("start")
+        fdir_pro = fdirprocess(
+            self,
+            self.pmd_output_list[0],
+            self.tester_ifaces,
+            rxq=8,
+        )
+        out = self.send_pkt_get_output(fdir_pro, packets, port_id=0, count=1)
+        # Check that one packet was received in primary process
+        self.check_pkt_num(out, port_id=0, pkt_num=len(packets))
+
+    def test_multiprocess_port_reset(self):
+        # start testpmd multi-process
+        self.launch_multi_testpmd(
+            proc_type="auto",
+            queue_num=8,
+            process_num=2,
+        )
+        for pmd_output in self.pmd_output_list:
+            pmd_output.execute_cmd("stop")
+            self.check_port_status(pmd_output, port_id=0, status=True)
+        # set primary process port reset
+        self.pmd_output_list[0].execute_cmd("port stop 0")
+        self.pmd_output_list[0].execute_cmd("port reset 0")
+        # Check that link status of port 0 is 'down' in secondary process and primary process
+        self.check_port_status(self.pmd_output_list[0], port_id=0, status=False)
+        self.check_port_status(self.pmd_output_list[1], port_id=0, status=False)
+
+        for pmd_output in self.pmd_output_list:
+            pmd_output.quit()
+
+        # start testpmd multi-process
+        self.launch_multi_testpmd(
+            proc_type="auto",
+            queue_num=8,
+            process_num=2,
+        )
+        for pmd_output in self.pmd_output_list:
+            pmd_output.execute_cmd("stop")
+            self.check_port_status(pmd_output, port_id=0, status=True)
+        # set secondary process port reset
+        self.pmd_output_list[1].execute_cmd("port stop 0")
+        self.pmd_output_list[1].execute_cmd("port reset 0")
+        # Check that link status of port 0 is 'up' in secondary process and primary process
+        self.check_port_status(self.pmd_output_list[0], port_id=0, status=True)
+        self.check_port_status(self.pmd_output_list[1], port_id=0, status=True)
+
     def set_fields(self):
         """set ip protocol field behavior"""
         fields_config = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dts] [PATCH V2 2/2] test_plans/multiprocess_iavf: add 2 cases
  2022-12-27 17:44 [dts] [PATCH V2 1/2] tests/multiprocess_iavf: add new cases according to testplan Song Jiale
@ 2022-12-27 17:44 ` Song Jiale
  2022-12-28  3:28   ` Ling, Jin
  0 siblings, 1 reply; 3+ messages in thread
From: Song Jiale @ 2022-12-27 17:44 UTC (permalink / raw)
  To: dts; +Cc: Song Jiale, Jin Ling

in DPDK multiprocess, all operations done by the secondary process on the hardware are invalid.
so action 'port stop' and 'port reset' only work in primary process. 
add 2 cases to test it.

Signed-off-by: Jin Ling <jin.ling@intel.com>
---
 test_plans/multiprocess_iavf_test_plan.rst | 142 ++++++++++++++++++++-
 1 file changed, 141 insertions(+), 1 deletion(-)

diff --git a/test_plans/multiprocess_iavf_test_plan.rst b/test_plans/multiprocess_iavf_test_plan.rst
index 2f5ae54a..f28f2042 100644
--- a/test_plans/multiprocess_iavf_test_plan.rst
+++ b/test_plans/multiprocess_iavf_test_plan.rst
@@ -47,6 +47,9 @@ twice - once as a primary instance, and once as a secondary instance. Messages
 are sent from primary to secondary and vice versa, demonstrating the processes
 are sharing memory and can communicate using rte_ring structures.
 
+In DPDK multprocess, all operations done by the secondary process on the hardware are invalid
+So action `port stop` and `port reset` only work in primary process.
+
 Prerequisites
 -------------
 
@@ -54,9 +57,11 @@ If using vfio the kernel must be >= 3.6+ and VT-d must be enabled in bios.When
 using vfio, use the following commands to load the vfio driver and bind it
 to the device under test::
 
+   echo 1 > /sys/bus/pci/devices/0000:17:00.0/sriov_numvfs
+   ip link set ens9 vf0 mac 00:11:22:33:44:55
    modprobe vfio
    modprobe vfio-pci
-   usertools/dpdk-devbind.py --bind=vfio-pci device_bus_id
+   usertools/dpdk-devbind.py --bind=vfio-pci {vf_pci}
 
 Assuming that a DPDK build has been set up and the multi-process sample
 applications have been built.
@@ -948,3 +953,138 @@ Test Case: test_multiprocess_negative_exceed_process_num
     the first and second processes should be launched successfully
     the third process should be launched failed and output should contain the following string:
     'multi-process option proc-id(2) should be less than num-procs(2)'
+
+Test Case: test_multiprocess_port_stop
+======================================
+Subcase 1: primary_port_stop
+----------------------------
+test steps
+~~~~~~~~~~
+
+1. Launch the app ``testpmd``, start 2 process with the following arguments::
+
+   ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8  --num-procs=2 --proc-id=0
+   ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8  --num-procs=2 --proc-id=1
+
+2. stop port  in secondary process and start fwd in primary::
+
+    secondary process:
+      testpmd> port stop 0
+
+    primary process:
+      testpmd> set fwd rxonly
+      testpmd> set verbose 1
+      testpmd> start
+
+3. send 1 packet from scapy::
+
+   >>> sendp([Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/Raw(load="P"*20)], iface="ens6")
+
+expected result
+~~~~~~~~~~~~~~~
+
+Check that one packet was received in primary process::
+
+    primary process:
+      testpmd> stop
+
+      ---------------------- Forward statistics for port 0  ----------------------
+        RX-packets: 1              RX-dropped: 0             RX-total: 1
+        TX-packets: 0              TX-dropped: 0             TX-total: 0
+      ----------------------------------------------------------------------------
+
+Subcase 2:secondary_port_stop
+------------------------------
+test steps
+~~~~~~~~~~
+
+1. Launch the app ``testpmd``, start 2 process with the following arguments::
+
+   ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8  --num-procs=2 --proc-id=0
+   ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8  --num-procs=2 --proc-id=1
+
+2. stop port in primary process and start fwd in secondary::
+
+    primary process:
+      testpmd> port stop 0
+
+    secondary process:
+      testpmd> set fwd rxonly
+      testpmd> set verbose 1
+      testpmd> start
+
+3. send 1 packet from scapy::
+
+   >>> sendp([Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/Raw(load="P"*20)], iface="ens6")
+
+expected result
+~~~~~~~~~~~~~~~
+
+   Check that no packet was received.
+
+   secondary process::
+
+    testpmd> stop
+    Telling cores to stop...
+    Waiting for lcores to finish...
+
+    ---------------------- Forward statistics for port 0  ----------------------
+    RX-packets: 0              RX-dropped: 1             RX-total: 1
+    TX-packets: 0              TX-dropped: 0             TX-total: 0
+    ----------------------------------------------------------------------------
+
+
+TestCase: test_multiprocess_vf_reset
+====================================
+Subcase 1: primary_port_reset
+-----------------------------
+test steps
+~~~~~~~~~~
+
+1. Launch the app ``testpmd``, start 2 process with rxq/txq set as 8 (proc_id:0~1, queue id:0~7) with the following arguments::
+
+    ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=0
+    ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=1
+
+
+2. reset port in primary when secondary is running::
+
+    primary process:
+      testpmd> port stop 0
+      testpmd> port reset 0
+
+
+expected result
+~~~~~~~~~~~~~~~
+
+secondary process & primary process::
+
+     testpmd>show port info 0
+
+   Check that link status of port 0 is `down`
+
+
+Subcase 2: secondary_port_reset
+-------------------------------
+test steps
+~~~~~~~~~~
+
+1. Launch the app ``testpmd``, start 2 process with rxq/txq set as 8 (proc_id:0~1, queue id:0~7) with the following arguments::
+
+    ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=0
+    ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:17:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=1
+
+2. reset port in secondary when primary is running::
+
+    secondary process:
+      testpmd> port stop 0
+      testpmd> port reset 0
+
+expected result
+~~~~~~~~~~~~~~~
+
+primary process & secondary process::
+
+    testpmd> show port info 0
+
+   Check that link status of port 0 is `up`
\ No newline at end of file
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [dts] [PATCH V2 2/2] test_plans/multiprocess_iavf: add 2 cases
  2022-12-27 17:44 ` [dts] [PATCH V2 2/2] test_plans/multiprocess_iavf: add 2 cases Song Jiale
@ 2022-12-28  3:28   ` Ling, Jin
  0 siblings, 0 replies; 3+ messages in thread
From: Ling, Jin @ 2022-12-28  3:28 UTC (permalink / raw)
  To: Jiale, SongX, dts



> -----Original Message-----
> From: Jiale, SongX <songx.jiale@intel.com>
> Sent: 2022年12月28日 1:44
> To: dts@dpdk.org
> Cc: Jiale, SongX <songx.jiale@intel.com>; Ling, Jin <jin.ling@intel.com>
> Subject: [dts] [PATCH V2 2/2] test_plans/multiprocess_iavf: add 2 cases
> 
> in DPDK multiprocess, all operations done by the secondary process on the
> hardware are invalid.
> so action 'port stop' and 'port reset' only work in primary process.
> add 2 cases to test it.
> 
> Signed-off-by: Jin Ling <jin.ling@intel.com>
> ---
>  test_plans/multiprocess_iavf_test_plan.rst | 142 ++++++++++++++++++++-
>  1 file changed, 141 insertions(+), 1 deletion(-)
> 
Acked-by: Jin Ling <jin.ling@intel.com>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-12-28  3:28 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-27 17:44 [dts] [PATCH V2 1/2] tests/multiprocess_iavf: add new cases according to testplan Song Jiale
2022-12-27 17:44 ` [dts] [PATCH V2 2/2] test_plans/multiprocess_iavf: add 2 cases Song Jiale
2022-12-28  3:28   ` Ling, Jin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).