* [dts] [PATCH V1] tests/port_control: automation port_contol
@ 2019-12-31 8:59 Zeng Xiaoxiao
2019-12-31 9:05 ` Zeng, XiaoxiaoX
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Zeng Xiaoxiao @ 2019-12-31 8:59 UTC (permalink / raw)
To: dts; +Cc: Zeng Xiaoxiao
*.according to port_control_test_plan.rst,automation suite port_control
Signed-off-by: Zeng Xiaoxiao <xiaoxiaox.zeng@intel.com>
---
tests/TestSuite_port_control.py | 260 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 260 insertions(+)
create mode 100644 tests/TestSuite_port_control.py
diff --git a/tests/TestSuite_port_control.py b/tests/TestSuite_port_control.py
new file mode 100644
index 0000000..a458add
--- /dev/null
+++ b/tests/TestSuite_port_control.py
@@ -0,0 +1,260 @@
+# BSD LICENSE
+#
+# Copyright(c) <2019> Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import utils
+import time
+import re
+import packet
+from test_case import TestCase
+from pmd_output import PmdOutput
+from virt_common import VM
+
+
+class TestPortControl(TestCase):
+
+ def set_up_all(self):
+ """
+ Run before each test suite
+ """
+ # initialize ports topology
+ self.vm0 = None
+ self.env_done = False
+ self.port_id_0 = 0
+ self.pkt_count = 1000
+ self.dut_ports = self.dut.get_ports(self.nic)
+ # Verify that enough ports are available
+ self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
+ self.pf_mac = self.dut.get_mac_address(self.dut_ports[0])
+ self.vf_mac = "00:01:23:45:67:89"
+ self.txitf = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0]))
+ self.host_testpmd = PmdOutput(self.dut)
+ self.vf_assign_method = 'vfio-pci'
+ self.dut.send_expect('modprobe vfio-pci', '#')
+ self.socket = self.dut.get_numa_id(self.dut_ports[0])
+ port = self.dut.ports_info[0]['port']
+ self.pf_default_driver = port.get_nic_driver()
+
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ pass
+
+ def setup_vm_env(self, driver='default'):
+ """
+ Create testing environment with 1VF generated from 1PF
+ """
+ if self.env_done:
+ return
+
+ # bind to default driver
+ self.bind_nic_driver(self.dut_ports[:1], driver="")
+ self.used_dut_port = self.dut_ports[0]
+ self.host_intf = self.dut.ports_info[self.used_dut_port]['intf']
+ self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver)
+ self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]['vfs_port']
+ self.dut.send_expect("ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# ")
+ try:
+ for port in self.sriov_vfs_port:
+ port.bind_driver(self.vf_assign_method)
+ time.sleep(1)
+ vf_popt = {'opt_host': self.sriov_vfs_port[0].pci}
+
+ # set up VM ENV
+ self.vm = VM(self.dut, 'vm0', 'port_control')
+ self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt)
+ self.vm_dut = self.vm.start()
+ if self.vm_dut is None:
+ raise Exception("Set up VM ENV failed!")
+
+ self.vm_testpmd = PmdOutput(self.vm_dut)
+
+ except Exception as e:
+ self.destroy_vm_env()
+ raise Exception(e)
+
+ self.env_done = True
+
+ def destroy_vm_env(self):
+ if getattr(self, 'vm', None):
+ if getattr(self, 'vm_dut', None):
+ self.vm_dut.kill_all()
+ self.vm_testpmd = None
+ self.vm_dut_ports = None
+ # destroy vm0
+ self.vm.stop()
+ self.dut.virt_exit()
+ time.sleep(3)
+ self.vm = None
+
+ if getattr(self, 'used_dut_port', None) != None:
+ self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
+ self.used_dut_port = None
+ self.bind_nic_driver(self.dut_ports[:1], driver=self.pf_default_driver)
+
+ self.env_done = False
+
+ def bind_nic_driver(self, ports, driver=""):
+ # modprobe vfio driver
+ if driver == "vfio-pci":
+ for port in ports:
+ netdev = self.dut.ports_info[port]['port']
+ driver = netdev.get_nic_driver()
+ if driver != 'vfio-pci':
+ netdev.bind_driver(driver=self.d)
+
+ elif driver == "igb_uio":
+ # igb_uio should insmod as default, no need to check
+ for port in ports:
+ netdev = self.dut.ports_info[port]['port']
+ driver = netdev.get_nic_driver()
+ if driver != 'igb_uio':
+ netdev.bind_driver(driver='igb_uio')
+ else:
+ for port in ports:
+ netdev = self.dut.ports_info[port]['port']
+ driver_now = netdev.get_nic_driver()
+ if driver is None:
+ driver = netdev.default_driver
+ if driver != driver_now:
+ netdev.bind_driver(driver=driver)
+
+ def start_testpmd(self, terminal):
+ terminal.start_testpmd(ports=[0], socket=self.socket)
+ res = terminal.wait_link_status_up('all', timeout=5)
+ self.verify(res is True, 'there have port link is down')
+ terminal.execute_cmd('set fwd mac')
+ terminal.execute_cmd('set promisc all off')
+
+ def start_pmd_port(self, terminal):
+ terminal.execute_cmd("port start all")
+ terminal.execute_cmd("start")
+ terminal.wait_link_status_up('all', timeout=5)
+ ret = terminal.get_port_link_status(self.port_id_0)
+ self.verify(ret == "up", "port not up!")
+
+ def stop_pmd_port(self, terminal):
+ terminal.execute_cmd("stop")
+ terminal.execute_cmd("port stop all")
+ ret = terminal.get_port_link_status(self.port_id_0)
+ self.verify(ret == "down", "port not down!")
+
+ def reset_pmd_port(self, terminal):
+ terminal.execute_cmd("port reset all")
+ ret = terminal.get_port_link_status(self.port_id_0)
+ self.verify(ret == "down", "port reset fail!")
+
+ def close_pmd_port(self, terminal):
+ terminal.execute_cmd("port close all")
+ ret = terminal.execute_cmd("show port info all")
+ ret = ret.split('\r')
+ self.verify(ret[1] == '', "close all port fail!")
+
+ def calculate_stats(self, start_stats, end_stats):
+ ret_stats = {}
+ ret_stats['RX-packets'] = int(end_stats['RX-packets']) - int(start_stats['RX-packets'])
+ ret_stats['TX-packets'] = int(end_stats['TX-packets']) - int(start_stats['TX-packets'])
+ return ret_stats
+
+ def send_and_verify_packets(self, terminal):
+ """
+ Send packets according to parameters.
+ """
+ if terminal is self.host_testpmd:
+ self.dts_mac = self.pf_mac
+ else:
+ self.dts_mac = self.vf_mac
+
+ self.pkt = packet.Packet('Ether(dst="%s")/IP()/Raw("x"*40)' % self.dts_mac)
+
+ pf_start_stats = terminal.get_pmd_stats(self.port_id_0)
+ self.pkt.send_pkt(crb=self.tester, tx_port=self.txitf, count=self.pkt_count, timeout=30)
+ pf_end_stats = terminal.get_pmd_stats(self.port_id_0)
+ pf_ret_stats = self.calculate_stats(pf_start_stats, pf_end_stats)
+
+ self.verify(pf_ret_stats['RX-packets'] == self.pkt_count and pf_ret_stats['TX-packets'] == self.pkt_count,
+ "Packets receive and forward fail!")
+
+ def test_pf_start_stop_reset_close(self):
+ self.start_testpmd(self.host_testpmd)
+ # start port
+ self.start_pmd_port(self.host_testpmd)
+ self.send_and_verify_packets(self.host_testpmd)
+ # stop port and start port
+ self.stop_pmd_port(self.host_testpmd)
+ self.start_pmd_port(self.host_testpmd)
+ self.send_and_verify_packets(self.host_testpmd)
+ # reset port
+ self.stop_pmd_port(self.host_testpmd)
+ self.reset_pmd_port(self.host_testpmd)
+ self.start_pmd_port(self.host_testpmd)
+ self.send_and_verify_packets(self.host_testpmd)
+ # close all port
+ self.stop_pmd_port(self.host_testpmd)
+ self.close_pmd_port(self.host_testpmd)
+
+ def test_e1000_start_stop_reset_close(self):
+ self.setup_vm_env()
+ self.start_testpmd(self.vm_testpmd)
+ # start port
+ self.start_pmd_port(self.vm_testpmd)
+ self.send_and_verify_packets(self.vm_testpmd)
+ # stop port and start port
+ self.stop_pmd_port(self.vm_testpmd)
+ self.start_pmd_port(self.vm_testpmd)
+ self.send_and_verify_packets(self.vm_testpmd)
+ # reset port
+ self.stop_pmd_port(self.vm_testpmd)
+ self.reset_pmd_port(self.vm_testpmd)
+ self.start_pmd_port(self.vm_testpmd)
+ self.send_and_verify_packets(self.vm_testpmd)
+ # close all port
+ self.stop_pmd_port(self.vm_testpmd)
+ self.close_pmd_port(self.vm_testpmd)
+
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+ if self.env_done:
+ self.vm_testpmd.quit()
+ self.destroy_vm_env()
+ else:
+ self.host_testpmd.quit()
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ if self.env_done:
+ self.destroy_vm_env()
+ self.dut.kill_all()
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dts] [PATCH V1] tests/port_control: automation port_contol
2019-12-31 8:59 [dts] [PATCH V1] tests/port_control: automation port_contol Zeng Xiaoxiao
@ 2019-12-31 9:05 ` Zeng, XiaoxiaoX
2019-12-31 9:31 ` Lu, Nannan
2020-01-02 5:16 ` Tu, Lijuan
2 siblings, 0 replies; 4+ messages in thread
From: Zeng, XiaoxiaoX @ 2019-12-31 9:05 UTC (permalink / raw)
To: dts
[-- Attachment #1: Type: text/plain, Size: 12047 bytes --]
Tested_by: Zeng,xiaoxiao<xiaoxiaox.zeng@intel.com>
> -----Original Message-----
> From: Zeng, XiaoxiaoX
> Sent: Tuesday, December 31, 2019 4:59 PM
> To: dts@dpdk.org
> Cc: Zeng, XiaoxiaoX <xiaoxiaox.zeng@intel.com>
> Subject: [dts] [PATCH V1] tests/port_control: automation port_contol
>
> *.according to port_control_test_plan.rst,automation suite port_control
>
> Signed-off-by: Zeng Xiaoxiao <xiaoxiaox.zeng@intel.com>
> ---
> tests/TestSuite_port_control.py | 260
> ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 260 insertions(+)
> create mode 100644 tests/TestSuite_port_control.py
>
> diff --git a/tests/TestSuite_port_control.py
> b/tests/TestSuite_port_control.py new file mode 100644 index
> 0000000..a458add
> --- /dev/null
> +++ b/tests/TestSuite_port_control.py
> @@ -0,0 +1,260 @@
> +# BSD LICENSE
> +#
> +# Copyright(c) <2019> Intel Corporation. All rights reserved.
> +# All rights reserved.
> +#
> +# Redistribution and use in source and binary forms, with or without #
> +modification, are permitted provided that the following conditions #
> +are met:
> +#
> +# * Redistributions of source code must retain the above copyright
> +# notice, this list of conditions and the following disclaimer.
> +# * Redistributions in binary form must reproduce the above copyright
> +# notice, this list of conditions and the following disclaimer in
> +# the documentation and/or other materials provided with the
> +# distribution.
> +# * Neither the name of Intel Corporation nor the names of its
> +# contributors may be used to endorse or promote products derived
> +# from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS #
> +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> #
> +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> FOR #
> +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT #
> +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL, #
> +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT #
> +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
> USE, #
> +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> ON ANY #
> +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
> +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE #
> +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +
> +import os
> +import utils
> +import time
> +import re
> +import packet
> +from test_case import TestCase
> +from pmd_output import PmdOutput
> +from virt_common import VM
> +
> +
> +class TestPortControl(TestCase):
> +
> + def set_up_all(self):
> + """
> + Run before each test suite
> + """
> + # initialize ports topology
> + self.vm0 = None
> + self.env_done = False
> + self.port_id_0 = 0
> + self.pkt_count = 1000
> + self.dut_ports = self.dut.get_ports(self.nic)
> + # Verify that enough ports are available
> + self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
> + self.pf_mac = self.dut.get_mac_address(self.dut_ports[0])
> + self.vf_mac = "00:01:23:45:67:89"
> + self.txitf =
> self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0]))
> + self.host_testpmd = PmdOutput(self.dut)
> + self.vf_assign_method = 'vfio-pci'
> + self.dut.send_expect('modprobe vfio-pci', '#')
> + self.socket = self.dut.get_numa_id(self.dut_ports[0])
> + port = self.dut.ports_info[0]['port']
> + self.pf_default_driver = port.get_nic_driver()
> +
> + def set_up(self):
> + """
> + Run before each test case.
> + """
> + pass
> +
> + def setup_vm_env(self, driver='default'):
> + """
> + Create testing environment with 1VF generated from 1PF
> + """
> + if self.env_done:
> + return
> +
> + # bind to default driver
> + self.bind_nic_driver(self.dut_ports[:1], driver="")
> + self.used_dut_port = self.dut_ports[0]
> + self.host_intf = self.dut.ports_info[self.used_dut_port]['intf']
> + self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1,
> driver=driver)
> + self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]['vfs_port']
> + self.dut.send_expect("ip link set %s vf 0 mac %s" % (self.host_intf,
> self.vf_mac), "# ")
> + try:
> + for port in self.sriov_vfs_port:
> + port.bind_driver(self.vf_assign_method)
> + time.sleep(1)
> + vf_popt = {'opt_host': self.sriov_vfs_port[0].pci}
> +
> + # set up VM ENV
> + self.vm = VM(self.dut, 'vm0', 'port_control')
> + self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt)
> + self.vm_dut = self.vm.start()
> + if self.vm_dut is None:
> + raise Exception("Set up VM ENV failed!")
> +
> + self.vm_testpmd = PmdOutput(self.vm_dut)
> +
> + except Exception as e:
> + self.destroy_vm_env()
> + raise Exception(e)
> +
> + self.env_done = True
> +
> + def destroy_vm_env(self):
> + if getattr(self, 'vm', None):
> + if getattr(self, 'vm_dut', None):
> + self.vm_dut.kill_all()
> + self.vm_testpmd = None
> + self.vm_dut_ports = None
> + # destroy vm0
> + self.vm.stop()
> + self.dut.virt_exit()
> + time.sleep(3)
> + self.vm = None
> +
> + if getattr(self, 'used_dut_port', None) != None:
> + self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
> + self.used_dut_port = None
> + self.bind_nic_driver(self.dut_ports[:1],
> + driver=self.pf_default_driver)
> +
> + self.env_done = False
> +
> + def bind_nic_driver(self, ports, driver=""):
> + # modprobe vfio driver
> + if driver == "vfio-pci":
> + for port in ports:
> + netdev = self.dut.ports_info[port]['port']
> + driver = netdev.get_nic_driver()
> + if driver != 'vfio-pci':
> + netdev.bind_driver(driver=self.d)
> +
> + elif driver == "igb_uio":
> + # igb_uio should insmod as default, no need to check
> + for port in ports:
> + netdev = self.dut.ports_info[port]['port']
> + driver = netdev.get_nic_driver()
> + if driver != 'igb_uio':
> + netdev.bind_driver(driver='igb_uio')
> + else:
> + for port in ports:
> + netdev = self.dut.ports_info[port]['port']
> + driver_now = netdev.get_nic_driver()
> + if driver is None:
> + driver = netdev.default_driver
> + if driver != driver_now:
> + netdev.bind_driver(driver=driver)
> +
> + def start_testpmd(self, terminal):
> + terminal.start_testpmd(ports=[0], socket=self.socket)
> + res = terminal.wait_link_status_up('all', timeout=5)
> + self.verify(res is True, 'there have port link is down')
> + terminal.execute_cmd('set fwd mac')
> + terminal.execute_cmd('set promisc all off')
> +
> + def start_pmd_port(self, terminal):
> + terminal.execute_cmd("port start all")
> + terminal.execute_cmd("start")
> + terminal.wait_link_status_up('all', timeout=5)
> + ret = terminal.get_port_link_status(self.port_id_0)
> + self.verify(ret == "up", "port not up!")
> +
> + def stop_pmd_port(self, terminal):
> + terminal.execute_cmd("stop")
> + terminal.execute_cmd("port stop all")
> + ret = terminal.get_port_link_status(self.port_id_0)
> + self.verify(ret == "down", "port not down!")
> +
> + def reset_pmd_port(self, terminal):
> + terminal.execute_cmd("port reset all")
> + ret = terminal.get_port_link_status(self.port_id_0)
> + self.verify(ret == "down", "port reset fail!")
> +
> + def close_pmd_port(self, terminal):
> + terminal.execute_cmd("port close all")
> + ret = terminal.execute_cmd("show port info all")
> + ret = ret.split('\r')
> + self.verify(ret[1] == '', "close all port fail!")
> +
> + def calculate_stats(self, start_stats, end_stats):
> + ret_stats = {}
> + ret_stats['RX-packets'] = int(end_stats['RX-packets']) -
> int(start_stats['RX-packets'])
> + ret_stats['TX-packets'] = int(end_stats['TX-packets']) -
> int(start_stats['TX-packets'])
> + return ret_stats
> +
> + def send_and_verify_packets(self, terminal):
> + """
> + Send packets according to parameters.
> + """
> + if terminal is self.host_testpmd:
> + self.dts_mac = self.pf_mac
> + else:
> + self.dts_mac = self.vf_mac
> +
> + self.pkt = packet.Packet('Ether(dst="%s")/IP()/Raw("x"*40)' %
> + self.dts_mac)
> +
> + pf_start_stats = terminal.get_pmd_stats(self.port_id_0)
> + self.pkt.send_pkt(crb=self.tester, tx_port=self.txitf,
> count=self.pkt_count, timeout=30)
> + pf_end_stats = terminal.get_pmd_stats(self.port_id_0)
> + pf_ret_stats = self.calculate_stats(pf_start_stats,
> + pf_end_stats)
> +
> + self.verify(pf_ret_stats['RX-packets'] == self.pkt_count and
> pf_ret_stats['TX-packets'] == self.pkt_count,
> + "Packets receive and forward fail!")
> +
> + def test_pf_start_stop_reset_close(self):
> + self.start_testpmd(self.host_testpmd)
> + # start port
> + self.start_pmd_port(self.host_testpmd)
> + self.send_and_verify_packets(self.host_testpmd)
> + # stop port and start port
> + self.stop_pmd_port(self.host_testpmd)
> + self.start_pmd_port(self.host_testpmd)
> + self.send_and_verify_packets(self.host_testpmd)
> + # reset port
> + self.stop_pmd_port(self.host_testpmd)
> + self.reset_pmd_port(self.host_testpmd)
> + self.start_pmd_port(self.host_testpmd)
> + self.send_and_verify_packets(self.host_testpmd)
> + # close all port
> + self.stop_pmd_port(self.host_testpmd)
> + self.close_pmd_port(self.host_testpmd)
> +
> + def test_e1000_start_stop_reset_close(self):
> + self.setup_vm_env()
> + self.start_testpmd(self.vm_testpmd)
> + # start port
> + self.start_pmd_port(self.vm_testpmd)
> + self.send_and_verify_packets(self.vm_testpmd)
> + # stop port and start port
> + self.stop_pmd_port(self.vm_testpmd)
> + self.start_pmd_port(self.vm_testpmd)
> + self.send_and_verify_packets(self.vm_testpmd)
> + # reset port
> + self.stop_pmd_port(self.vm_testpmd)
> + self.reset_pmd_port(self.vm_testpmd)
> + self.start_pmd_port(self.vm_testpmd)
> + self.send_and_verify_packets(self.vm_testpmd)
> + # close all port
> + self.stop_pmd_port(self.vm_testpmd)
> + self.close_pmd_port(self.vm_testpmd)
> +
> + def tear_down(self):
> + """
> + Run after each test case.
> + """
> + if self.env_done:
> + self.vm_testpmd.quit()
> + self.destroy_vm_env()
> + else:
> + self.host_testpmd.quit()
> +
> + def tear_down_all(self):
> + """
> + Run after each test suite.
> + """
> + if self.env_done:
> + self.destroy_vm_env()
> + self.dut.kill_all()
> --
> 1.8.3.1
[-- Attachment #2: TestPortControl.log --]
[-- Type: application/octet-stream, Size: 40190 bytes --]
31/12/2019 17:03:42 dts:
TEST SUITE : TestPortControl
31/12/2019 17:03:42 dts: NIC : fortville_25g
31/12/2019 17:03:42 dut.10.239.250.18:
31/12/2019 17:03:42 tester:
31/12/2019 17:03:42 dut.10.239.250.18: modprobe vfio-pci
31/12/2019 17:03:42 dut.10.239.250.18:
31/12/2019 17:03:42 TestPortControl: Test Case test_pf_start_stop_reset_close Begin
31/12/2019 17:03:43 dut.10.239.250.18:
31/12/2019 17:03:43 tester:
31/12/2019 17:03:43 dut.10.239.250.18: ./x86_64-native-linuxapp-gcc/app/testpmd -l 1,2 -n 4 -w 0000:86:00.0 --file-prefix=dpdk_85707_20191231170319 -- -i
31/12/2019 17:03:53 dut.10.239.250.18: EAL: Detected 72 lcore(s)
EAL: Detected 2 NUMA nodes
EAL: Multi-process socket /var/run/dpdk/dpdk_85707_20191231170319/mp_socket
EAL: Selected IOVA mode 'PA'
EAL: 1024 hugepages of size 2097152 reserved, but no mounted hugetlbfs found for that size
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: PCI device 0000:86:00.0 on NUMA socket 1
EAL: probe driver: 8086:158b net_i40e
Interactive-mode selected
testpmd: create a new mbuf pool <mbuf_pool_socket_0>: n=155456, size=2176, socket=0
testpmd: preferred mempool ops selected: ring_mp_mc
testpmd: create a new mbuf pool <mbuf_pool_socket_1>: n=155456, size=2176, socket=1
testpmd: preferred mempool ops selected: ring_mp_mc
Warning! port-topology=paired and odd forward ports number, the last port will pair with itself.
Configuring Port 0 (socket 1)
Port 0: 3C:FD:FE:B8:97:64
Checking link statuses...
Done
31/12/2019 17:04:03 dut.10.239.250.18: show port info all
31/12/2019 17:04:03 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: enabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:03 dut.10.239.250.18: set fwd mac
31/12/2019 17:04:03 dut.10.239.250.18: set fwd mac
Set mac packet forwarding mode
31/12/2019 17:04:03 dut.10.239.250.18: set promisc all off
31/12/2019 17:04:04 dut.10.239.250.18: set promisc all off
31/12/2019 17:04:04 dut.10.239.250.18: port start all
31/12/2019 17:04:04 dut.10.239.250.18: port start all
Port 0 is now not stopped
Please stop the ports first
Done
31/12/2019 17:04:04 dut.10.239.250.18: start
31/12/2019 17:04:04 dut.10.239.250.18: start
mac packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP allocation mode: native
Logical Core 2 (socket 0) forwards packets on 1 streams:
RX P=0/Q=0 (socket 1) -> TX P=0/Q=0 (socket 1) peer=02:00:00:00:00:00
mac packet forwarding packets/burst=32
nb forwarding cores=1 - nb forwarding ports=1
port 0: RX queue number: 1 Tx queue number: 1
Rx offloads=0x0 Tx offloads=0x10000
RX queue: 0
RX desc=256 - RX free threshold=32
RX threshold registers: pthresh=8 hthresh=8 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=256 - TX free threshold=32
TX threshold registers: pthresh=32 hthresh=0 wthresh=0
TX offloads=0x10000 - TX RS bit threshold=32
31/12/2019 17:04:04 dut.10.239.250.18: show port info all
31/12/2019 17:04:04 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:04 dut.10.239.250.18: show port info 0
31/12/2019 17:04:04 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:04 dut.10.239.250.18: show port stats 0
31/12/2019 17:04:04 dut.10.239.250.18: show port stats 0
######################## NIC statistics for port 0 ########################
RX-packets: 0 RX-missed: 0 RX-bytes: 0
RX-errors: 0
RX-nombuf: 0
TX-packets: 0 TX-errors: 0 TX-bytes: 0
Throughput (since last show)
Rx-pps: 0 Rx-bps: 0
Tx-pps: 0 Tx-bps: 0
############################################################################
31/12/2019 17:04:04 tester: scp -v /home/autoregression/zxx_dts/output/tmp/pcap/scapy_enp134s0f0.pcap1577783044.38 root@10.239.250.12:/tmp/tester/
31/12/2019 17:04:05 tester: scp -v /home/autoregression/zxx_dts/output/tmp/pcap/scapy_enp134s0f0.cmd1577783044.38 root@10.239.250.12:/tmp/tester/
31/12/2019 17:04:07 tester: python /tmp/tester/scapy_enp134s0f0.cmd1577783044.38
31/12/2019 17:04:08 tester: packet ready for sending...
Ether(src='00:00:00:00:00:00', dst='3c:fd:fe:b8:97:64', type=2048)/IP(frag=0, src='127.0.0.1', proto=0, tos=0, dst='127.0.0.1', chksum=31935, len=60, id=1, version=4, flags=0, ihl=5, ttl=64)/Raw(load='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
Sent 1000 packets.
31/12/2019 17:04:08 dut.10.239.250.18: show port stats 0
31/12/2019 17:04:08 dut.10.239.250.18: show port stats 0
######################## NIC statistics for port 0 ########################
RX-packets: 1000 RX-missed: 0 RX-bytes: 74000
RX-errors: 0
RX-nombuf: 0
TX-packets: 1000 TX-errors: 0 TX-bytes: 74000
Throughput (since last show)
Rx-pps: 225 Rx-bps: 133536
Tx-pps: 225 Tx-bps: 133536
############################################################################
31/12/2019 17:04:08 dut.10.239.250.18: stop
31/12/2019 17:04:08 dut.10.239.250.18: stop
Telling cores to stop...
Waiting for lcores to finish...
---------------------- Forward statistics for port 0 ----------------------
RX-packets: 1000 RX-dropped: 0 RX-total: 1000
TX-packets: 1000 TX-dropped: 0 TX-total: 1000
----------------------------------------------------------------------------
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
RX-packets: 1000 RX-dropped: 0 RX-total: 1000
TX-packets: 1000 TX-dropped: 0 TX-total: 1000
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Done.
31/12/2019 17:04:08 dut.10.239.250.18: port stop all
31/12/2019 17:04:09 dut.10.239.250.18: port stop all
Stopping ports...
Checking link statuses...
Done
31/12/2019 17:04:09 dut.10.239.250.18: show port info 0
31/12/2019 17:04:09 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:09 dut.10.239.250.18: port start all
31/12/2019 17:04:09 dut.10.239.250.18: port start all
Port 0: 3C:FD:FE:B8:97:64
Checking link statuses...
Done
31/12/2019 17:04:09 dut.10.239.250.18: start
31/12/2019 17:04:09 dut.10.239.250.18: start
mac packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP allocation mode: native
Logical Core 2 (socket 0) forwards packets on 1 streams:
RX P=0/Q=0 (socket 1) -> TX P=0/Q=0 (socket 1) peer=02:00:00:00:00:00
mac packet forwarding packets/burst=32
nb forwarding cores=1 - nb forwarding ports=1
port 0: RX queue number: 1 Tx queue number: 1
Rx offloads=0x0 Tx offloads=0x10000
RX queue: 0
RX desc=256 - RX free threshold=32
RX threshold registers: pthresh=8 hthresh=8 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=256 - TX free threshold=32
TX threshold registers: pthresh=32 hthresh=0 wthresh=0
TX offloads=0x10000 - TX RS bit threshold=32
31/12/2019 17:04:09 dut.10.239.250.18: show port info all
31/12/2019 17:04:09 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:10 dut.10.239.250.18: show port info all
31/12/2019 17:04:10 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:11 dut.10.239.250.18: show port info all
31/12/2019 17:04:11 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:11 dut.10.239.250.18: show port info 0
31/12/2019 17:04:11 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:11 dut.10.239.250.18: show port stats 0
31/12/2019 17:04:11 dut.10.239.250.18: show port stats 0
######################## NIC statistics for port 0 ########################
RX-packets: 1000 RX-missed: 0 RX-bytes: 74000
RX-errors: 0
RX-nombuf: 0
TX-packets: 1000 TX-errors: 0 TX-bytes: 74000
Throughput (since last show)
Rx-pps: 0 Rx-bps: 0
Tx-pps: 0 Tx-bps: 0
############################################################################
31/12/2019 17:04:11 tester: scp -v /home/autoregression/zxx_dts/output/tmp/pcap/scapy_enp134s0f0.pcap1577783051.69 root@10.239.250.12:/tmp/tester/
31/12/2019 17:04:13 tester: scp -v /home/autoregression/zxx_dts/output/tmp/pcap/scapy_enp134s0f0.cmd1577783051.69 root@10.239.250.12:/tmp/tester/
31/12/2019 17:04:14 tester: python /tmp/tester/scapy_enp134s0f0.cmd1577783051.69
31/12/2019 17:04:16 tester: packet ready for sending...
Ether(src='00:00:00:00:00:00', dst='3c:fd:fe:b8:97:64', type=2048)/IP(frag=0, src='127.0.0.1', proto=0, tos=0, dst='127.0.0.1', chksum=31935, len=60, id=1, version=4, flags=0, ihl=5, ttl=64)/Raw(load='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
Sent 1000 packets.
31/12/2019 17:04:16 dut.10.239.250.18: show port stats 0
31/12/2019 17:04:16 dut.10.239.250.18: show port stats 0
######################## NIC statistics for port 0 ########################
RX-packets: 2000 RX-missed: 0 RX-bytes: 148000
RX-errors: 0
RX-nombuf: 0
TX-packets: 2000 TX-errors: 0 TX-bytes: 148000
Throughput (since last show)
Rx-pps: 215 Rx-bps: 127800
Tx-pps: 215 Tx-bps: 127800
############################################################################
31/12/2019 17:04:16 dut.10.239.250.18: stop
31/12/2019 17:04:16 dut.10.239.250.18: stop
Telling cores to stop...
Waiting for lcores to finish...
---------------------- Forward statistics for port 0 ----------------------
RX-packets: 1000 RX-dropped: 0 RX-total: 1000
TX-packets: 1000 TX-dropped: 0 TX-total: 1000
----------------------------------------------------------------------------
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
RX-packets: 1000 RX-dropped: 0 RX-total: 1000
TX-packets: 1000 TX-dropped: 0 TX-total: 1000
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Done.
31/12/2019 17:04:16 dut.10.239.250.18: port stop all
31/12/2019 17:04:16 dut.10.239.250.18: port stop all
Stopping ports...
Checking link statuses...
Done
31/12/2019 17:04:16 dut.10.239.250.18: show port info 0
31/12/2019 17:04:16 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:16 dut.10.239.250.18: port reset all
31/12/2019 17:04:16 dut.10.239.250.18: port reset all
Resetting ports...
Device with port_id=0 already stopped
Done
31/12/2019 17:04:16 dut.10.239.250.18: show port info 0
31/12/2019 17:04:16 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:16 dut.10.239.250.18: port start all
31/12/2019 17:04:16 dut.10.239.250.18: port start all
Configuring Port 0 (socket 1)
Port 0: 3C:FD:FE:B8:97:64
Checking link statuses...
Done
31/12/2019 17:04:16 dut.10.239.250.18: start
31/12/2019 17:04:17 dut.10.239.250.18: start
mac packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP allocation mode: native
Logical Core 2 (socket 0) forwards packets on 1 streams:
RX P=0/Q=0 (socket 1) -> TX P=0/Q=0 (socket 1) peer=02:00:00:00:00:00
mac packet forwarding packets/burst=32
nb forwarding cores=1 - nb forwarding ports=1
port 0: RX queue number: 1 Tx queue number: 1
Rx offloads=0x0 Tx offloads=0x10000
RX queue: 0
RX desc=256 - RX free threshold=32
RX threshold registers: pthresh=8 hthresh=8 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=256 - TX free threshold=32
TX threshold registers: pthresh=32 hthresh=0 wthresh=0
TX offloads=0x10000 - TX RS bit threshold=32
31/12/2019 17:04:17 dut.10.239.250.18: show port info all
31/12/2019 17:04:17 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:18 dut.10.239.250.18: show port info all
31/12/2019 17:04:18 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:19 dut.10.239.250.18: show port info all
31/12/2019 17:04:19 dut.10.239.250.18: show port info all
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:19 dut.10.239.250.18: show port info 0
31/12/2019 17:04:19 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: up
Link speed: 25000 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:19 dut.10.239.250.18: show port stats 0
31/12/2019 17:04:19 dut.10.239.250.18: show port stats 0
######################## NIC statistics for port 0 ########################
RX-packets: 0 RX-missed: 0 RX-bytes: 0
RX-errors: 0
RX-nombuf: 0
TX-packets: 0 TX-errors: 0 TX-bytes: 0
Throughput (since last show)
Rx-pps: 0 Rx-bps: 0
Tx-pps: 0 Tx-bps: 0
############################################################################
31/12/2019 17:04:19 tester: scp -v /home/autoregression/zxx_dts/output/tmp/pcap/scapy_enp134s0f0.pcap1577783059.39 root@10.239.250.12:/tmp/tester/
31/12/2019 17:04:20 tester: scp -v /home/autoregression/zxx_dts/output/tmp/pcap/scapy_enp134s0f0.cmd1577783059.39 root@10.239.250.12:/tmp/tester/
31/12/2019 17:04:22 tester: python /tmp/tester/scapy_enp134s0f0.cmd1577783059.39
31/12/2019 17:04:23 tester: packet ready for sending...
Ether(src='00:00:00:00:00:00', dst='3c:fd:fe:b8:97:64', type=2048)/IP(frag=0, src='127.0.0.1', proto=0, tos=0, dst='127.0.0.1', chksum=31935, len=60, id=1, version=4, flags=0, ihl=5, ttl=64)/Raw(load='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
Sent 1000 packets.
31/12/2019 17:04:23 dut.10.239.250.18: show port stats 0
31/12/2019 17:04:23 dut.10.239.250.18: show port stats 0
######################## NIC statistics for port 0 ########################
RX-packets: 1000 RX-missed: 0 RX-bytes: 74000
RX-errors: 0
RX-nombuf: 0
TX-packets: 1000 TX-errors: 0 TX-bytes: 74000
Throughput (since last show)
Rx-pps: 232 Rx-bps: 137688
Tx-pps: 232 Tx-bps: 137688
############################################################################
31/12/2019 17:04:23 dut.10.239.250.18: stop
31/12/2019 17:04:23 dut.10.239.250.18: stop
Telling cores to stop...
Waiting for lcores to finish...
---------------------- Forward statistics for port 0 ----------------------
RX-packets: 1000 RX-dropped: 0 RX-total: 1000
TX-packets: 1000 TX-dropped: 0 TX-total: 1000
----------------------------------------------------------------------------
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
RX-packets: 1000 RX-dropped: 0 RX-total: 1000
TX-packets: 1000 TX-dropped: 0 TX-total: 1000
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Done.
31/12/2019 17:04:23 dut.10.239.250.18: port stop all
31/12/2019 17:04:23 dut.10.239.250.18: port stop all
Stopping ports...
Checking link statuses...
Done
31/12/2019 17:04:23 dut.10.239.250.18: show port info 0
31/12/2019 17:04:23 dut.10.239.250.18: show port info 0
********************* Infos for port 0 *********************
MAC address: 3C:FD:FE:B8:97:64
Device name: 0000:86:00.0
Driver name: net_i40e
Devargs:
Connect to socket: 1
memory allocation on the socket: 1
Link status: down
Link speed: 0 Mbps
Link duplex: full-duplex
MTU: 1500
Promiscuous mode: disabled
Allmulticast mode: disabled
Maximum number of MAC addresses: 64
Maximum number of MAC addresses of hash filtering: 0
VLAN offload:
strip off, filter off, extend off, qinq strip off
Hash key size in bytes: 52
Redirection table size: 512
Supported RSS offload flow types:
ipv4-frag
ipv4-tcp
ipv4-udp
ipv4-sctp
ipv4-other
ipv6-frag
ipv6-tcp
ipv6-udp
ipv6-sctp
ipv6-other
l2_payload
Minimum size of RX buffer: 1024
Maximum configurable length of RX packet: 9728
Maximum configurable size of LRO aggregated packet: 0
Maximum number of VMDq pools: 64
Current number of RX queues: 1
Max possible RX queues: 320
Max possible number of RXDs per queue: 4096
Min possible number of RXDs per queue: 64
RXDs number alignment: 32
Current number of TX queues: 1
Max possible TX queues: 320
Max possible number of TXDs per queue: 4096
Min possible number of TXDs per queue: 64
TXDs number alignment: 32
Max segment number per packet: 255
Max segment number per MTU/TSO: 8
31/12/2019 17:04:23 dut.10.239.250.18: port close all
31/12/2019 17:04:24 dut.10.239.250.18: port close all
Closing ports...
Done
31/12/2019 17:04:24 dut.10.239.250.18: show port info all
31/12/2019 17:04:24 dut.10.239.250.18: show port info all
31/12/2019 17:04:24 TestPortControl: Test Case test_pf_start_stop_reset_close Result PASSED:
31/12/2019 17:04:24 dut.10.239.250.18: quit
31/12/2019 17:04:27 dut.10.239.250.18: quit
Bye...
31/12/2019 17:04:27 dts:
TEST SUITE ENDED: TestPortControl
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dts] [PATCH V1] tests/port_control: automation port_contol
2019-12-31 8:59 [dts] [PATCH V1] tests/port_control: automation port_contol Zeng Xiaoxiao
2019-12-31 9:05 ` Zeng, XiaoxiaoX
@ 2019-12-31 9:31 ` Lu, Nannan
2020-01-02 5:16 ` Tu, Lijuan
2 siblings, 0 replies; 4+ messages in thread
From: Lu, Nannan @ 2019-12-31 9:31 UTC (permalink / raw)
To: Zeng, XiaoxiaoX, dts; +Cc: Zeng, XiaoxiaoX
Acked-by: Nannan Lu <nannan.lu@intel.com>
-----Original Message-----
From: dts <dts-bounces@dpdk.org> On Behalf Of Zeng Xiaoxiao
Sent: Tuesday, December 31, 2019 4:59 PM
To: dts@dpdk.org
Cc: Zeng, XiaoxiaoX <xiaoxiaox.zeng@intel.com>
Subject: [dts] [PATCH V1] tests/port_control: automation port_contol
*.according to port_control_test_plan.rst,automation suite port_control
Signed-off-by: Zeng Xiaoxiao <xiaoxiaox.zeng@intel.com>
---
tests/TestSuite_port_control.py | 260 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 260 insertions(+)
create mode 100644 tests/TestSuite_port_control.py
diff --git a/tests/TestSuite_port_control.py b/tests/TestSuite_port_control.py new file mode 100644 index 0000000..a458add
--- /dev/null
+++ b/tests/TestSuite_port_control.py
@@ -0,0 +1,260 @@
+# BSD LICENSE
+#
+# Copyright(c) <2019> Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without #
+modification, are permitted provided that the following conditions #
+are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import utils
+import time
+import re
+import packet
+from test_case import TestCase
+from pmd_output import PmdOutput
+from virt_common import VM
+
+
+class TestPortControl(TestCase):
+
+ def set_up_all(self):
+ """
+ Run before each test suite
+ """
+ # initialize ports topology
+ self.vm0 = None
+ self.env_done = False
+ self.port_id_0 = 0
+ self.pkt_count = 1000
+ self.dut_ports = self.dut.get_ports(self.nic)
+ # Verify that enough ports are available
+ self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
+ self.pf_mac = self.dut.get_mac_address(self.dut_ports[0])
+ self.vf_mac = "00:01:23:45:67:89"
+ self.txitf = self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0]))
+ self.host_testpmd = PmdOutput(self.dut)
+ self.vf_assign_method = 'vfio-pci'
+ self.dut.send_expect('modprobe vfio-pci', '#')
+ self.socket = self.dut.get_numa_id(self.dut_ports[0])
+ port = self.dut.ports_info[0]['port']
+ self.pf_default_driver = port.get_nic_driver()
+
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ pass
+
+ def setup_vm_env(self, driver='default'):
+ """
+ Create testing environment with 1VF generated from 1PF
+ """
+ if self.env_done:
+ return
+
+ # bind to default driver
+ self.bind_nic_driver(self.dut_ports[:1], driver="")
+ self.used_dut_port = self.dut_ports[0]
+ self.host_intf = self.dut.ports_info[self.used_dut_port]['intf']
+ self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver)
+ self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]['vfs_port']
+ self.dut.send_expect("ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# ")
+ try:
+ for port in self.sriov_vfs_port:
+ port.bind_driver(self.vf_assign_method)
+ time.sleep(1)
+ vf_popt = {'opt_host': self.sriov_vfs_port[0].pci}
+
+ # set up VM ENV
+ self.vm = VM(self.dut, 'vm0', 'port_control')
+ self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt)
+ self.vm_dut = self.vm.start()
+ if self.vm_dut is None:
+ raise Exception("Set up VM ENV failed!")
+
+ self.vm_testpmd = PmdOutput(self.vm_dut)
+
+ except Exception as e:
+ self.destroy_vm_env()
+ raise Exception(e)
+
+ self.env_done = True
+
+ def destroy_vm_env(self):
+ if getattr(self, 'vm', None):
+ if getattr(self, 'vm_dut', None):
+ self.vm_dut.kill_all()
+ self.vm_testpmd = None
+ self.vm_dut_ports = None
+ # destroy vm0
+ self.vm.stop()
+ self.dut.virt_exit()
+ time.sleep(3)
+ self.vm = None
+
+ if getattr(self, 'used_dut_port', None) != None:
+ self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
+ self.used_dut_port = None
+ self.bind_nic_driver(self.dut_ports[:1],
+ driver=self.pf_default_driver)
+
+ self.env_done = False
+
+ def bind_nic_driver(self, ports, driver=""):
+ # modprobe vfio driver
+ if driver == "vfio-pci":
+ for port in ports:
+ netdev = self.dut.ports_info[port]['port']
+ driver = netdev.get_nic_driver()
+ if driver != 'vfio-pci':
+ netdev.bind_driver(driver=self.d)
+
+ elif driver == "igb_uio":
+ # igb_uio should insmod as default, no need to check
+ for port in ports:
+ netdev = self.dut.ports_info[port]['port']
+ driver = netdev.get_nic_driver()
+ if driver != 'igb_uio':
+ netdev.bind_driver(driver='igb_uio')
+ else:
+ for port in ports:
+ netdev = self.dut.ports_info[port]['port']
+ driver_now = netdev.get_nic_driver()
+ if driver is None:
+ driver = netdev.default_driver
+ if driver != driver_now:
+ netdev.bind_driver(driver=driver)
+
+ def start_testpmd(self, terminal):
+ terminal.start_testpmd(ports=[0], socket=self.socket)
+ res = terminal.wait_link_status_up('all', timeout=5)
+ self.verify(res is True, 'there have port link is down')
+ terminal.execute_cmd('set fwd mac')
+ terminal.execute_cmd('set promisc all off')
+
+ def start_pmd_port(self, terminal):
+ terminal.execute_cmd("port start all")
+ terminal.execute_cmd("start")
+ terminal.wait_link_status_up('all', timeout=5)
+ ret = terminal.get_port_link_status(self.port_id_0)
+ self.verify(ret == "up", "port not up!")
+
+ def stop_pmd_port(self, terminal):
+ terminal.execute_cmd("stop")
+ terminal.execute_cmd("port stop all")
+ ret = terminal.get_port_link_status(self.port_id_0)
+ self.verify(ret == "down", "port not down!")
+
+ def reset_pmd_port(self, terminal):
+ terminal.execute_cmd("port reset all")
+ ret = terminal.get_port_link_status(self.port_id_0)
+ self.verify(ret == "down", "port reset fail!")
+
+ def close_pmd_port(self, terminal):
+ terminal.execute_cmd("port close all")
+ ret = terminal.execute_cmd("show port info all")
+ ret = ret.split('\r')
+ self.verify(ret[1] == '', "close all port fail!")
+
+ def calculate_stats(self, start_stats, end_stats):
+ ret_stats = {}
+ ret_stats['RX-packets'] = int(end_stats['RX-packets']) - int(start_stats['RX-packets'])
+ ret_stats['TX-packets'] = int(end_stats['TX-packets']) - int(start_stats['TX-packets'])
+ return ret_stats
+
+ def send_and_verify_packets(self, terminal):
+ """
+ Send packets according to parameters.
+ """
+ if terminal is self.host_testpmd:
+ self.dts_mac = self.pf_mac
+ else:
+ self.dts_mac = self.vf_mac
+
+ self.pkt = packet.Packet('Ether(dst="%s")/IP()/Raw("x"*40)' %
+ self.dts_mac)
+
+ pf_start_stats = terminal.get_pmd_stats(self.port_id_0)
+ self.pkt.send_pkt(crb=self.tester, tx_port=self.txitf, count=self.pkt_count, timeout=30)
+ pf_end_stats = terminal.get_pmd_stats(self.port_id_0)
+ pf_ret_stats = self.calculate_stats(pf_start_stats,
+ pf_end_stats)
+
+ self.verify(pf_ret_stats['RX-packets'] == self.pkt_count and pf_ret_stats['TX-packets'] == self.pkt_count,
+ "Packets receive and forward fail!")
+
+ def test_pf_start_stop_reset_close(self):
+ self.start_testpmd(self.host_testpmd)
+ # start port
+ self.start_pmd_port(self.host_testpmd)
+ self.send_and_verify_packets(self.host_testpmd)
+ # stop port and start port
+ self.stop_pmd_port(self.host_testpmd)
+ self.start_pmd_port(self.host_testpmd)
+ self.send_and_verify_packets(self.host_testpmd)
+ # reset port
+ self.stop_pmd_port(self.host_testpmd)
+ self.reset_pmd_port(self.host_testpmd)
+ self.start_pmd_port(self.host_testpmd)
+ self.send_and_verify_packets(self.host_testpmd)
+ # close all port
+ self.stop_pmd_port(self.host_testpmd)
+ self.close_pmd_port(self.host_testpmd)
+
+ def test_e1000_start_stop_reset_close(self):
+ self.setup_vm_env()
+ self.start_testpmd(self.vm_testpmd)
+ # start port
+ self.start_pmd_port(self.vm_testpmd)
+ self.send_and_verify_packets(self.vm_testpmd)
+ # stop port and start port
+ self.stop_pmd_port(self.vm_testpmd)
+ self.start_pmd_port(self.vm_testpmd)
+ self.send_and_verify_packets(self.vm_testpmd)
+ # reset port
+ self.stop_pmd_port(self.vm_testpmd)
+ self.reset_pmd_port(self.vm_testpmd)
+ self.start_pmd_port(self.vm_testpmd)
+ self.send_and_verify_packets(self.vm_testpmd)
+ # close all port
+ self.stop_pmd_port(self.vm_testpmd)
+ self.close_pmd_port(self.vm_testpmd)
+
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+ if self.env_done:
+ self.vm_testpmd.quit()
+ self.destroy_vm_env()
+ else:
+ self.host_testpmd.quit()
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ if self.env_done:
+ self.destroy_vm_env()
+ self.dut.kill_all()
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dts] [PATCH V1] tests/port_control: automation port_contol
2019-12-31 8:59 [dts] [PATCH V1] tests/port_control: automation port_contol Zeng Xiaoxiao
2019-12-31 9:05 ` Zeng, XiaoxiaoX
2019-12-31 9:31 ` Lu, Nannan
@ 2020-01-02 5:16 ` Tu, Lijuan
2 siblings, 0 replies; 4+ messages in thread
From: Tu, Lijuan @ 2020-01-02 5:16 UTC (permalink / raw)
To: Zeng, XiaoxiaoX, dts; +Cc: Zeng, XiaoxiaoX
applied
> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of Zeng Xiaoxiao
> Sent: Tuesday, December 31, 2019 4:59 PM
> To: dts@dpdk.org
> Cc: Zeng, XiaoxiaoX <xiaoxiaox.zeng@intel.com>
> Subject: [dts] [PATCH V1] tests/port_control: automation port_contol
>
> *.according to port_control_test_plan.rst,automation suite port_control
>
> Signed-off-by: Zeng Xiaoxiao <xiaoxiaox.zeng@intel.com>
> ---
> tests/TestSuite_port_control.py | 260
> ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 260 insertions(+)
> create mode 100644 tests/TestSuite_port_control.py
>
> diff --git a/tests/TestSuite_port_control.py b/tests/TestSuite_port_control.py
> new file mode 100644 index 0000000..a458add
> --- /dev/null
> +++ b/tests/TestSuite_port_control.py
> @@ -0,0 +1,260 @@
> +# BSD LICENSE
> +#
> +# Copyright(c) <2019> Intel Corporation. All rights reserved.
> +# All rights reserved.
> +#
> +# Redistribution and use in source and binary forms, with or without #
> +modification, are permitted provided that the following conditions #
> +are met:
> +#
> +# * Redistributions of source code must retain the above copyright
> +# notice, this list of conditions and the following disclaimer.
> +# * Redistributions in binary form must reproduce the above copyright
> +# notice, this list of conditions and the following disclaimer in
> +# the documentation and/or other materials provided with the
> +# distribution.
> +# * Neither the name of Intel Corporation nor the names of its
> +# contributors may be used to endorse or promote products derived
> +# from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS #
> +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> #
> +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> FOR #
> +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT #
> +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL, #
> +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT #
> +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
> USE, #
> +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> ON ANY #
> +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> #
> +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE #
> +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +
> +import os
> +import utils
> +import time
> +import re
> +import packet
> +from test_case import TestCase
> +from pmd_output import PmdOutput
> +from virt_common import VM
> +
> +
> +class TestPortControl(TestCase):
> +
> + def set_up_all(self):
> + """
> + Run before each test suite
> + """
> + # initialize ports topology
> + self.vm0 = None
> + self.env_done = False
> + self.port_id_0 = 0
> + self.pkt_count = 1000
> + self.dut_ports = self.dut.get_ports(self.nic)
> + # Verify that enough ports are available
> + self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
> + self.pf_mac = self.dut.get_mac_address(self.dut_ports[0])
> + self.vf_mac = "00:01:23:45:67:89"
> + self.txitf =
> self.tester.get_interface(self.tester.get_local_port(self.dut_ports[0]))
> + self.host_testpmd = PmdOutput(self.dut)
> + self.vf_assign_method = 'vfio-pci'
> + self.dut.send_expect('modprobe vfio-pci', '#')
> + self.socket = self.dut.get_numa_id(self.dut_ports[0])
> + port = self.dut.ports_info[0]['port']
> + self.pf_default_driver = port.get_nic_driver()
> +
> + def set_up(self):
> + """
> + Run before each test case.
> + """
> + pass
> +
> + def setup_vm_env(self, driver='default'):
> + """
> + Create testing environment with 1VF generated from 1PF
> + """
> + if self.env_done:
> + return
> +
> + # bind to default driver
> + self.bind_nic_driver(self.dut_ports[:1], driver="")
> + self.used_dut_port = self.dut_ports[0]
> + self.host_intf = self.dut.ports_info[self.used_dut_port]['intf']
> + self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1,
> driver=driver)
> + self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]['vfs_port']
> + self.dut.send_expect("ip link set %s vf 0 mac %s" % (self.host_intf,
> self.vf_mac), "# ")
> + try:
> + for port in self.sriov_vfs_port:
> + port.bind_driver(self.vf_assign_method)
> + time.sleep(1)
> + vf_popt = {'opt_host': self.sriov_vfs_port[0].pci}
> +
> + # set up VM ENV
> + self.vm = VM(self.dut, 'vm0', 'port_control')
> + self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt)
> + self.vm_dut = self.vm.start()
> + if self.vm_dut is None:
> + raise Exception("Set up VM ENV failed!")
> +
> + self.vm_testpmd = PmdOutput(self.vm_dut)
> +
> + except Exception as e:
> + self.destroy_vm_env()
> + raise Exception(e)
> +
> + self.env_done = True
> +
> + def destroy_vm_env(self):
> + if getattr(self, 'vm', None):
> + if getattr(self, 'vm_dut', None):
> + self.vm_dut.kill_all()
> + self.vm_testpmd = None
> + self.vm_dut_ports = None
> + # destroy vm0
> + self.vm.stop()
> + self.dut.virt_exit()
> + time.sleep(3)
> + self.vm = None
> +
> + if getattr(self, 'used_dut_port', None) != None:
> + self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
> + self.used_dut_port = None
> + self.bind_nic_driver(self.dut_ports[:1],
> + driver=self.pf_default_driver)
> +
> + self.env_done = False
> +
> + def bind_nic_driver(self, ports, driver=""):
> + # modprobe vfio driver
> + if driver == "vfio-pci":
> + for port in ports:
> + netdev = self.dut.ports_info[port]['port']
> + driver = netdev.get_nic_driver()
> + if driver != 'vfio-pci':
> + netdev.bind_driver(driver=self.d)
> +
> + elif driver == "igb_uio":
> + # igb_uio should insmod as default, no need to check
> + for port in ports:
> + netdev = self.dut.ports_info[port]['port']
> + driver = netdev.get_nic_driver()
> + if driver != 'igb_uio':
> + netdev.bind_driver(driver='igb_uio')
> + else:
> + for port in ports:
> + netdev = self.dut.ports_info[port]['port']
> + driver_now = netdev.get_nic_driver()
> + if driver is None:
> + driver = netdev.default_driver
> + if driver != driver_now:
> + netdev.bind_driver(driver=driver)
> +
> + def start_testpmd(self, terminal):
> + terminal.start_testpmd(ports=[0], socket=self.socket)
> + res = terminal.wait_link_status_up('all', timeout=5)
> + self.verify(res is True, 'there have port link is down')
> + terminal.execute_cmd('set fwd mac')
> + terminal.execute_cmd('set promisc all off')
> +
> + def start_pmd_port(self, terminal):
> + terminal.execute_cmd("port start all")
> + terminal.execute_cmd("start")
> + terminal.wait_link_status_up('all', timeout=5)
> + ret = terminal.get_port_link_status(self.port_id_0)
> + self.verify(ret == "up", "port not up!")
> +
> + def stop_pmd_port(self, terminal):
> + terminal.execute_cmd("stop")
> + terminal.execute_cmd("port stop all")
> + ret = terminal.get_port_link_status(self.port_id_0)
> + self.verify(ret == "down", "port not down!")
> +
> + def reset_pmd_port(self, terminal):
> + terminal.execute_cmd("port reset all")
> + ret = terminal.get_port_link_status(self.port_id_0)
> + self.verify(ret == "down", "port reset fail!")
> +
> + def close_pmd_port(self, terminal):
> + terminal.execute_cmd("port close all")
> + ret = terminal.execute_cmd("show port info all")
> + ret = ret.split('\r')
> + self.verify(ret[1] == '', "close all port fail!")
> +
> + def calculate_stats(self, start_stats, end_stats):
> + ret_stats = {}
> + ret_stats['RX-packets'] = int(end_stats['RX-packets']) -
> int(start_stats['RX-packets'])
> + ret_stats['TX-packets'] = int(end_stats['TX-packets']) -
> int(start_stats['TX-packets'])
> + return ret_stats
> +
> + def send_and_verify_packets(self, terminal):
> + """
> + Send packets according to parameters.
> + """
> + if terminal is self.host_testpmd:
> + self.dts_mac = self.pf_mac
> + else:
> + self.dts_mac = self.vf_mac
> +
> + self.pkt = packet.Packet('Ether(dst="%s")/IP()/Raw("x"*40)' %
> + self.dts_mac)
> +
> + pf_start_stats = terminal.get_pmd_stats(self.port_id_0)
> + self.pkt.send_pkt(crb=self.tester, tx_port=self.txitf,
> count=self.pkt_count, timeout=30)
> + pf_end_stats = terminal.get_pmd_stats(self.port_id_0)
> + pf_ret_stats = self.calculate_stats(pf_start_stats,
> + pf_end_stats)
> +
> + self.verify(pf_ret_stats['RX-packets'] == self.pkt_count and
> pf_ret_stats['TX-packets'] == self.pkt_count,
> + "Packets receive and forward fail!")
> +
> + def test_pf_start_stop_reset_close(self):
> + self.start_testpmd(self.host_testpmd)
> + # start port
> + self.start_pmd_port(self.host_testpmd)
> + self.send_and_verify_packets(self.host_testpmd)
> + # stop port and start port
> + self.stop_pmd_port(self.host_testpmd)
> + self.start_pmd_port(self.host_testpmd)
> + self.send_and_verify_packets(self.host_testpmd)
> + # reset port
> + self.stop_pmd_port(self.host_testpmd)
> + self.reset_pmd_port(self.host_testpmd)
> + self.start_pmd_port(self.host_testpmd)
> + self.send_and_verify_packets(self.host_testpmd)
> + # close all port
> + self.stop_pmd_port(self.host_testpmd)
> + self.close_pmd_port(self.host_testpmd)
> +
> + def test_e1000_start_stop_reset_close(self):
> + self.setup_vm_env()
> + self.start_testpmd(self.vm_testpmd)
> + # start port
> + self.start_pmd_port(self.vm_testpmd)
> + self.send_and_verify_packets(self.vm_testpmd)
> + # stop port and start port
> + self.stop_pmd_port(self.vm_testpmd)
> + self.start_pmd_port(self.vm_testpmd)
> + self.send_and_verify_packets(self.vm_testpmd)
> + # reset port
> + self.stop_pmd_port(self.vm_testpmd)
> + self.reset_pmd_port(self.vm_testpmd)
> + self.start_pmd_port(self.vm_testpmd)
> + self.send_and_verify_packets(self.vm_testpmd)
> + # close all port
> + self.stop_pmd_port(self.vm_testpmd)
> + self.close_pmd_port(self.vm_testpmd)
> +
> + def tear_down(self):
> + """
> + Run after each test case.
> + """
> + if self.env_done:
> + self.vm_testpmd.quit()
> + self.destroy_vm_env()
> + else:
> + self.host_testpmd.quit()
> +
> + def tear_down_all(self):
> + """
> + Run after each test suite.
> + """
> + if self.env_done:
> + self.destroy_vm_env()
> + self.dut.kill_all()
> --
> 1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-01-02 5:16 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-31 8:59 [dts] [PATCH V1] tests/port_control: automation port_contol Zeng Xiaoxiao
2019-12-31 9:05 ` Zeng, XiaoxiaoX
2019-12-31 9:31 ` Lu, Nannan
2020-01-02 5:16 ` Tu, Lijuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).