test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V1] tests: add new suite cvl ecpri
@ 2021-04-02 10:34 Zhou Jun
  0 siblings, 0 replies; only message in thread
From: Zhou Jun @ 2021-04-02 10:34 UTC (permalink / raw)
  To: dts; +Cc: Zhou Jun

ecpri only support by wireless pkg so split it to a new suite

Signed-off-by: Zhou Jun <junx.w.zhou@intel.com>
---
 tests/TestSuite_cvl_ecpri.py | 898 +++++++++++++++++++++++++++++++++++
 1 file changed, 898 insertions(+)
 create mode 100644 tests/TestSuite_cvl_ecpri.py

diff --git a/tests/TestSuite_cvl_ecpri.py b/tests/TestSuite_cvl_ecpri.py
new file mode 100644
index 00000000..af018ffd
--- /dev/null
+++ b/tests/TestSuite_cvl_ecpri.py
@@ -0,0 +1,898 @@
+import re\r
+from packet import Packet\r
+from pmd_output import PmdOutput\r
+from test_case import TestCase\r
+import rte_flow_common as rfc\r
+import utils\r
+from utils import GREEN, RED\r
+import time\r
+\r
+Mac_list = ['00:11:22:33:44:55', '00:11:22:33:44:11', '00:11:22:33:44:22', '00:11:22:33:44:33']\r
+\r
+pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x00\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02\')/Raw('x'*11)/Raw(\'\\x00\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02')/Raw('x'*11)/Raw(\'\\x01')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02\')/Raw('x'*11)/Raw(\'\\x03\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02\')/Raw('x'*11)/Raw(\'\\x05\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02\')/Raw('x'*11)/Raw(\'\\x06\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02\')/Raw('x'*11)/Raw(\'\\x07\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x02\')/Raw('x'*11)/Raw(\'\\x08\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x05\')",\r
+             "Ether(dst='{}')/IP()/UDP(dport={})/Raw(\'\\x10\\x06\')"\r
+             ]\r
+\r
+ptype_match_lst = ['ptype=' + str(i) for i in range(372, 382)]\r
+ptype_nomatch_lst = ['ptype=24'] * 10\r
+\r
+# eCPRI over Ethernet header data.\r
+eCPRI_over_Ethernet_rule = "flow create 1 ingress pattern eth / ecpri common type iq_data / end actions rss types ecpri end key_len 0 queues end / end"\r
+over_eth_header_packets = {\r
+    'match': ["Ether(dst='00:11:22:33:44:11', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\x45\')"],\r
+    'unmatched': ["Ether(dst='00:11:22:33:44:11', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\x46\')"]\r
+}\r
+\r
+tv_over_eth_queue_index = {\r
+    "name": "test_eth_queue_index",\r
+    "rule": "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions queue index 3 / mark id 1 / end",\r
+    "scapy_str": over_eth_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "queue": 3, "mark_id": 1, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_eth_rss_queues = {\r
+    "name": "test_eth_rss_queues",\r
+    "rule": "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions rss queues 5 6 end / mark id 2 / end",\r
+    "scapy_str": over_eth_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "queue": [5, 6], "mark_id": 2, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_eth_drop = {\r
+    "name": "test_eth_drop",\r
+    "rule": "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions drop / end",\r
+    "scapy_str": over_eth_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "drop": True, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_eth_passthru = {\r
+    "name": "test_eth_passthru",\r
+    "rule": "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions passthru / mark id 1 / end",\r
+    "scapy_str": over_eth_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "rss": True, "mark_id": 1, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_eth_mark_rss = {\r
+    "name": "test_eth_mark_rss",\r
+    "rule": "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions mark / rss / end",\r
+    "scapy_str": over_eth_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "mark_id": 0, "rss": True, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_eth_mark = {\r
+    "name": "test_eth_mark",\r
+    "rule": "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions mark / end",\r
+    "scapy_str": over_eth_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "mark_id": 0, "rss": True, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+# eCPRI over IP/UDP header data.\r
+eCPRI_over_IP_UDP_rule = "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end actions rss types ecpri end key_len 0 queues end / end"\r
+over_ip_udp_header_packets = {\r
+    'match': ["Ether(dst='00:11:22:33:44:11')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\x45\')"],\r
+    'unmatched': ["Ether(dst='00:11:22:33:44:11')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\x46\')"]\r
+}\r
+\r
+tv_over_ip_udp_queue_index = {\r
+    "name": "test_ip_udp_queue_index",\r
+    "rule": "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end actions queue index 2 / mark / end",\r
+    "scapy_str": over_ip_udp_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "queue": 2, "mark_id": 0, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_ip_udp_rss_queues = {\r
+    "name": "test_ip_udp_rss_queues",\r
+    "rule": "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end actions rss queues 5 6 end / mark id 2 / end",\r
+    "scapy_str": over_ip_udp_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "queue": [5, 6], "mark_id": 2, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_ip_udp_drop = {\r
+    "name": "test_ip_udp_drop",\r
+    "rule": "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end actions drop / end",\r
+    "scapy_str": over_ip_udp_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "drop": True, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_ip_udp_passthru = {\r
+    "name": "test_ip_udp_passthru",\r
+    "rule": "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end actions passthru / mark id 1 / end",\r
+    "scapy_str": over_ip_udp_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "rss": True, "mark_id": 1, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_ip_udp_mark_rss = {\r
+    "name": "test_ip_udp_mark_rss",\r
+    "rule": "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end actions mark / rss / end",\r
+    "scapy_str": over_ip_udp_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "mark_id": 0, "rss": True, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_ip_udp_mark = {\r
+    "name": "test_ip_udp_mark",\r
+    "rule": "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end actions mark / end",\r
+    "scapy_str": over_ip_udp_header_packets,\r
+    "check_func": rfc.check_mark,\r
+    "check_param": {"port_id": 1, "mark_id": 0, "rss": True, 'rxq': 16},\r
+    "send_port": {"port_id": 0}\r
+}\r
+\r
+tv_over_eth = [tv_over_eth_queue_index, tv_over_eth_rss_queues, tv_over_eth_drop, tv_over_eth_passthru, tv_over_eth_mark_rss, tv_over_eth_mark]\r
+\r
+tv_over_ip_udp = [tv_over_ip_udp_queue_index, tv_over_ip_udp_rss_queues, tv_over_ip_udp_drop, tv_over_ip_udp_passthru, tv_over_ip_udp_mark_rss, tv_over_ip_udp_mark]\r
+\r
+class TestCVLEcpri(TestCase):\r
+    def set_up_all(self):\r
+        """\r
+        Run at the start of each test suite.\r
+        prerequisites.\r
+        """\r
+        # Based on h/w type, choose how many ports to use\r
+        self.dut_ports = self.dut.get_ports(self.nic)\r
+        self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing")\r
+        # Verify that enough threads are available\r
+        cores = self.dut.get_core_list("1S/4C/1T")\r
+        self.verify(cores is not None, "Insufficient cores for speed testing")\r
+        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])\r
+        self.tester_port0 = self.tester.get_local_port(self.dut_ports[0])\r
+        self.tester_port1 = self.tester.get_local_port(self.dut_ports[1])\r
+        self.tester_iface0 = self.tester.get_interface(self.tester_port0)\r
+        self.tester_iface1 = self.tester.get_interface(self.tester_port1)\r
+\r
+        self.used_dut_port = self.dut_ports[0]\r
+        self.pf_interface = self.dut.ports_info[self.dut_ports[0]]['intf']\r
+        self.file_path = './drivers/net/iavf/iavf_rxtx.c'\r
+        self.compile_dpdk()\r
+        self.vf_flag = False\r
+        self.create_iavf()\r
+\r
+        self.pass_flag = 'passed'\r
+        self.fail_flag = 'failed'\r
+        self.pkt = Packet()\r
+        self.pmd_output = PmdOutput(self.dut)\r
+        self.right_ecpri = '0x5123'\r
+        self.wrong_ecpri = '0x5121'\r
+\r
+    def set_up(self):\r
+        """\r
+        Run before each test case.\r
+        """\r
+        self.launch_testpmd()\r
+        self.pkt = Packet()\r
+\r
+    def create_iavf(self):\r
+        if self.vf_flag is False:\r
+            self.dut.bind_interfaces_linux('ice')\r
+            self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 4)\r
+            self.sriov_vfs_port = self.dut.ports_info[self.used_dut_port]['vfs_port']\r
+            self.vf_flag = True\r
+\r
+            try:\r
+                for i in range(len(self.sriov_vfs_port)):\r
+                    if i != len(self.sriov_vfs_port):\r
+                        self.sriov_vfs_port[i].bind_driver(self.drivername)\r
+                    self.dut.send_expect("ip link set %s vf %s mac %s" % (self.pf_interface, i, Mac_list[i]), "# ")\r
+\r
+                #self.vf0_prop = {'opt_host': self.sriov_vfs_port[0].pci}\r
+                #self.dut.send_expect("ifconfig %s up" % self.pf_interface, "# ")\r
+                self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf_interface, "# ")\r
+            except Exception as e:\r
+                self.destroy_iavf()\r
+                raise Exception(e)\r
+\r
+    def destroy_iavf(self):\r
+        if self.vf_flag is True:\r
+            self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)\r
+            self.vf_flag = False\r
+\r
+    def launch_testpmd(self):\r
+        eal_param = " -a {},cap=dcf -a {} -a {}".format(self.sriov_vfs_port[0].pci, self.sriov_vfs_port[1].pci,\r
+                                                                   self.sriov_vfs_port[2].pci)\r
+        param = " --rxq=16 --txq=16"\r
+        out = self.pmd_output.start_testpmd(cores=[0, 1, 2, 3], eal_param=eal_param, param=param, socket=self.ports_socket)\r
+        # check the VF0 driver is net_ice_dcf\r
+        self.check_dcf_status(out, stats=True)\r
+        self.pmd_output.execute_cmd("set fwd rxonly")\r
+        self.pmd_output.execute_cmd("set verbose 1")\r
+        self.pmd_output.execute_cmd("start")\r
+\r
+    def check_dcf_status(self, out_testpmd, stats=True):\r
+        """\r
+        check if request for DCF is accepted.\r
+        """\r
+        if stats:\r
+            self.verify("Failed to init DCF parent adapter" not in out_testpmd, "request for DCF is rejected.")\r
+            out_portinfo = self.dut.send_expect("show port info 0", "testpmd> ", 15)\r
+            self.verify("net_ice_dcf" in out_portinfo, "request for DCF is rejected.")\r
+        else:\r
+            self.verify("Failed to init DCF parent adapter" in out_testpmd, "request for DCF is accepted.")\r
+            out_portinfo = self.dut.send_expect("show port info 0", "testpmd> ", 15)\r
+            self.verify("net_ice_dcf" not in out_portinfo, "request for DCF is accepted.")\r
+\r
+    def test_add_and_delete_eCPRI_port_config_in_DCF(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        self.send_and_verify(Mac_list[1], self.right_ecpri, if_match=True)\r
+        self.send_and_verify(Mac_list[1], self.wrong_ecpri, if_match=False)\r
+        self.send_and_verify(Mac_list[2], self.right_ecpri, if_match=True)\r
+        # remove rule and test\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port rm ecpri {}".format(self.right_ecpri))\r
+\r
+        self.send_and_verify(Mac_list[1], self.right_ecpri, if_match=False)\r
+\r
+    def test_add_and_delete_eCPRI_port_config_in_DCF(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        self.pmd_output.execute_cmd("quit", expected="#")\r
+        self.launch_testpmd()\r
+        self.send_and_verify(Mac_list[1], self.right_ecpri, if_match=False)\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        # use new mac to test\r
+        new_session = self.dut.create_session(name="new_session")\r
+        new_mac = "00:11:22:33:44:66"\r
+        new_session.send_expect("ip link set {} vf 0 mac {}".format(self.pf_interface, new_mac), "#", timeout=10)\r
+        self.send_and_verify(Mac_list[1], self.right_ecpri, if_match=False)\r
+        self.pmd_output.execute_cmd("quit", expected="#")\r
+        # set port vf 0 trust off and test\r
+        self.launch_testpmd()\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        new_session.send_expect("ip link set {} vf 0 trust off".format(self.pf_interface), "#", timeout=10)\r
+        self.send_and_verify(Mac_list[1], self.right_ecpri, if_match=False)\r
+        new_session.close()\r
+\r
+    def test_DCF_port_config_and_linux_port_config(self):\r
+        new_session = self.dut.create_session(name="new_session")\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        new_session.send_expect("dmesg -c", "#")\r
+        new_session.send_expect("ip link add vx0 type vxlan id 100 local 1.1.1.1 remote "\r
+                                "2.2.2.2 dev {} dstport 0x1234".format(self.pf_interface), "#")\r
+        new_session.send_expect("ifconfig vx0 up", "#")\r
+        new_session.send_expect("ifconfig vx0 down", "#")\r
+        out = new_session.send_expect("dmesg", "#")\r
+        self.verify("Cannot config tunnel, the capability is used by DCF" in out, "port can used by another thread!")\r
+        # delete eCPRI port config and test\r
+        new_session.send_expect("dmesg -c", "#")\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port rm ecpri {}".format(self.right_ecpri))\r
+        new_session.send_expect("ifconfig vx0 up", "#")\r
+        new_session.send_expect("ifconfig vx0 down", "# ")\r
+        out = new_session.send_expect("dmesg", "#")\r
+        self.verify("Cannot config tunnel, the capability is used by DCF" not in out, "port can't used by another thread!")\r
+        self.pmd_output.execute_cmd("quit", "#")\r
+        # do ecpri test\r
+        self.launch_testpmd()\r
+        new_session.send_expect("ip link add vx0 type vxlan id 100 local 1.1.1.1 remote "\r
+                                "2.2.2.2 dev {} dstport 0x1234".format(self.pf_interface), "#")\r
+        new_session.send_expect("ifconfig vx0 up", "#")\r
+        out = self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        self.verify("ice_dcf_send_aq_cmd(): No response (201 times) or return failure (desc: -63 / buff: -63)" in out,\r
+                    "test fail")\r
+        # set vx0 down and test\r
+        new_session.send_expect("ifconfig vx0 down", "#")\r
+        out = self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        self.verify("ice_dcf_send_aq_cmd(): No response (201 times) or return failure (desc: -63 / buff: -63)"\r
+                    not in out, "test fail")\r
+        new_session.close()\r
+\r
+    def test_negative_eCPRI_port_config_in_DCF(self):\r
+        ecpri_and_expect_dic = {"1": "Operation not supported",\r
+                                "5": "Invalid port",\r
+                                "15": "Invalid port",\r
+                                "a": "Bad arguments"\r
+                                }\r
+        # set wrong port to test\r
+        for ecpri in ecpri_and_expect_dic.keys():\r
+            out = self.pmd_output.execute_cmd("port config {} udp_tunnel_port add ecpri {}".format(ecpri, self.right_ecpri))\r
+            self.verify(ecpri_and_expect_dic[ecpri] in out, "test fail")\r
+        # set an invalid ecpri to test\r
+        ecpri_and_expect_dic = {"ffff": "Bad arguments",\r
+                                "65536": "Bad arguments"}\r
+\r
+        for ecpri in ecpri_and_expect_dic.keys():\r
+            out = self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(ecpri))\r
+            self.verify(ecpri_and_expect_dic[ecpri] in out, "test fail")\r
+            if ecpri == "0":\r
+                # test remove an invalid ecpri\r
+                out = self.pmd_output.execute_cmd("port config 0 udp_tunnel_port rm ecpri {}".format(ecpri))\r
+                self.verify("Operation not permitted" in out, "test fail")\r
+\r
+    def test_rss_for_udp_ecpri(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri {}".format(self.right_ecpri))\r
+        self.pmd_output.execute_cmd("flow validate 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / "\r
+                                    "end actions rss types ecpri end key_len 0 queues end / end")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / "\r
+                                    "end actions rss types ecpri end key_len 0 queues end / end")\r
+        tag_lst = ['x45', 'x46', 'x47']\r
+        pkt_str = "Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(\r
+            Mac_list[1])\r
+        data_lst = self.get_receive_lst(tag_lst, [pkt_str])\r
+        hash_lst = [i.get('RSS hash') for i in data_lst]\r
+        self.verify(len(set(hash_lst)) == len(tag_lst) == len(set([i.get('queue') for i in data_lst])), "test fail, RSS hash is same.")\r
+        # destroy rule and test\r
+        self.pmd_output.execute_cmd("flow destroy 1 rule 0")\r
+        out = self.pmd_output.execute_cmd("flow list 1")\r
+        data_lst = self.get_receive_lst(tag_lst, [pkt_str], stats=False)\r
+        hash_lst = [i.get('RSS hash') for i in data_lst]\r
+        self.verify(len(hash_lst) == 0 or len(set(hash_lst)) == 1, "test fail, rule still worked.")\r
+\r
+    def test_rss_for_eth_ecpri(self):\r
+        self.dut.send_expect("quit", "# ")\r
+        eal_param = " -a {} -a {}".format(self.sriov_vfs_port[0].pci, self.sriov_vfs_port[1].pci)\r
+        param = " --rxq=16 --txq=16"\r
+        self.pmd_output.start_testpmd(cores=[0, 1, 2, 3], eal_param=eal_param, param=param, socket=self.ports_socket)\r
+        self.pmd_output.execute_cmd("set fwd rxonly")\r
+        self.pmd_output.execute_cmd("set verbose 1")\r
+        self.pmd_output.execute_cmd("start")\r
+        self.pmd_output.execute_cmd("flow validate 1 ingress pattern eth / ecpri common type iq_data / "\r
+                                    "end actions rss types ecpri end key_len 0 queues end / end")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ecpri common type iq_data / end actions "\r
+                                    "rss types ecpri end key_len 0 queues end / end")\r
+        tag_lst = ['x45', 'x46', 'x47']\r
+        pkt_str = "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])\r
+        data_lst = self.get_receive_lst(tag_lst, [pkt_str])\r
+        hash_lst = [i.get('RSS hash') for i in data_lst]\r
+        self.verify(len(set(hash_lst)) == len(tag_lst), "test fail, RSS hash is same.")\r
+        # destroy rule and test\r
+        self.pmd_output.execute_cmd("flow destroy 1 rule 0")\r
+        self.pmd_output.execute_cmd("flow list 1")\r
+        data_lst = self.get_receive_lst(tag_lst, [pkt_str], stats=False)\r
+        hash_lst = [i.get('RSS hash') for i in data_lst]\r
+        self.verify(len(hash_lst) == 0 or len(set(hash_lst)) == 1, "test fail, rule still worked.")\r
+\r
+    def test_rss_multirules_multiports(self):\r
+        dst_mac_lst = Mac_list[1:3]\r
+        tag_lst = ['x45', 'x46']\r
+        module_pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')",\r
+                   "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')"]\r
+        rule_lst = ["flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end actions rss "\r
+                    "types ecpri end key_len 0 queues end / end",\r
+                    "flow create 1 ingress pattern eth / ecpri common type iq_data / end actions rss types ecpri end "\r
+                    "key_len 0 queues end / end",\r
+                    "flow create 2 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end actions rss types"\r
+                    " ecpri end key_len 0 queues end / end",\r
+                    "flow create 2 ingress pattern eth / ecpri common type iq_data / end actions rss types ecpri end "\r
+                    "key_len 0 queues end / end"]\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        for rule in rule_lst:\r
+            self.pmd_output.execute_cmd(rule)\r
+        out_data = {}\r
+        for dst_mac in dst_mac_lst:\r
+            pkt_lst = [pkt.format(dst_mac) for pkt in module_pkt_lst]\r
+            reta_line = self.get_receive_lst(tag_lst, pkt_lst)\r
+            out_data.setdefault(dst_mac, reta_line)\r
+        # verify\r
+        for key in out_data.keys():\r
+            hash_lst = [i.get('RSS hash') for i in out_data[key]]\r
+            self.verify(len(set(hash_lst)) == 2 and None not in hash_lst, 'test fail, RSS hash is same.')\r
+\r
+        # destroy rule to test\r
+        self.pmd_output.execute_cmd("flow destroy 1 rule 0")\r
+        self.pmd_output.execute_cmd("flow destroy 1 rule 1")\r
+        self.pmd_output.execute_cmd("flow list 1")\r
+        self.pmd_output.execute_cmd("flow destroy 2 rule 0")\r
+        self.pmd_output.execute_cmd("flow destroy 2 rule 1")\r
+        self.pmd_output.execute_cmd("flow list 2")\r
+        out_data = {}\r
+        for dst_mac in dst_mac_lst:\r
+            pkt_lst = [pkt.format(dst_mac) for pkt in module_pkt_lst]\r
+            reta_line = self.get_receive_lst(tag_lst[:1], pkt_lst, stats=False)\r
+            out_data.setdefault(dst_mac, reta_line)\r
+        # verify\r
+        for key in out_data.keys():\r
+            hash_lst = [i.get('RSS hash') for i in out_data[key]]\r
+            self.verify(len(set(hash_lst)) == 1, 'test fail, RSS hash is same.')\r
+\r
+    def test_rss_without_or_with_udp_port_set_for_udp_ecpri_rule(self):\r
+        tag_lst = ['x45', 'x46', 'x47', 'x48']\r
+        pkt = "Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end "\r
+                               "actions rss types ecpri end key_len 0 queues end / end")\r
+        out_data = self.get_receive_lst(tag_lst, [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 1, 'test fail, rule worked!')\r
+        # set ecpri and test\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        out_data = self.get_receive_lst(tag_lst[:2], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 2 and None not in hash_lst, 'test fail, rule not worked!')\r
+\r
+    def test_DCF_reset_for_udp_ecpri_rss(self):\r
+        tag_lst = ['x45', 'x46', 'x47']\r
+        pkt = "Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end "\r
+                               "actions rss types ecpri end key_len 0 queues end / end")\r
+        out_data = self.get_receive_lst(tag_lst[:2], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 2 and None not in hash_lst, 'test fail, RSS hash is same')\r
+\r
+        new_session = self.dut.create_session(name="new_session")\r
+        new_session.send_expect("ip link set {} vf 0 mac 00:11:22:33:44:66".format(self.pf_interface), "#")\r
+        out_data = self.get_receive_lst(tag_lst, [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 1, 'test fail, RSS hash is not same')\r
+\r
+        # restart testpmd and test\r
+        new_mac = "00:11:22:33:44:55"\r
+        new_session.send_expect("ip link set {} vf 0 mac {}".format(self.pf_interface, new_mac), "#")\r
+        self.dut.send_expect("quit", "# ")\r
+        self.launch_testpmd()\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end "\r
+                               "actions rss types ecpri end key_len 0 queues end / end")\r
+        out_data = self.get_receive_lst(tag_lst[:2], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 2 and None not in hash_lst, 'test fail, RSS hash is same')\r
+\r
+        new_session.send_expect("ip link set {} vf 0 mac 00:11:22:33:44:66".format(self.pf_interface), "#")\r
+        out_data = self.get_receive_lst(tag_lst, [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 1, 'test fail, RSS hash is not same')\r
+\r
+        self.dut.send_expect("quit", "# ")\r
+        self.launch_testpmd()\r
+        new_session.send_expect("ip link set {} vf 0 trust off".format(self.pf_interface), "#")\r
+        out_data = self.get_receive_lst(tag_lst, [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 1, 'test fail, RSS hash is not same')\r
+        new_session.send_expect("ip link set {} vf 0 trust on".format(self.pf_interface), "#")\r
+        new_session.close()\r
+\r
+    def test_DCF_reset_for_eth_ecpri_rss(self):\r
+        tag_lst = ['x45', 'x46', 'x47', 'x48']\r
+        pkt = "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')"\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ecpri common type iq_data / end actions rss types"\r
+                                    " ecpri end key_len 0 queues end / end")\r
+\r
+        out_data = self.get_receive_lst(tag_lst[:2], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 2 and None not in hash_lst, 'test fail, RSS hash is same')\r
+        new_session = self.dut.create_session(name="new_session")\r
+        new_session.send_expect("ip link set {} vf 0 mac 00:11:22:33:44:66".format(self.pf_interface), "#")\r
+        out_data = self.get_receive_lst(tag_lst[1:], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 3 and None not in hash_lst, 'test fail, RSS hash is same')\r
+\r
+        new_session.send_expect("ip link set {} vf 0 trust off".format(self.pf_interface), "#")\r
+        out_data = self.get_receive_lst(tag_lst[:2], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 2 and None not in hash_lst, 'test fail, RSS hash is same')\r
+\r
+        new_session.send_expect("ip link set {} vf 0 mac 00:11:22:33:44:66".format(self.pf_interface), "#")\r
+        out_data = self.get_receive_lst(tag_lst[1:], [pkt])\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 3 and None not in hash_lst, 'test fail, RSS hash is same')\r
+\r
+        new_session.send_expect("ip link set {} vf 0 trust on".format(self.pf_interface), "#")\r
+        new_session.send_expect("ip link set {} vf 0 mac 00:11:22:33:44:55".format(self.pf_interface), "#")\r
+        new_session.close()\r
+\r
+    def test_DCF_exit_for_eth_ecpri_and_udp_ecpri_rss(self):\r
+        self.dut.send_expect("quit", "# ")\r
+        eal_param = " -a {},cap=dcf".format(self.sriov_vfs_port[0].pci)\r
+        self.pmd_output.start_testpmd(cores=list(range(8)), eal_param=eal_param, prefix="test1", socket=self.ports_socket)\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        new_session = self.dut.create_session(name="new_session")\r
+        pmd_output1 = PmdOutput(self.dut, new_session)\r
+        eal_param1 = " -a {} -a {}".format(self.sriov_vfs_port[1].pci, self.sriov_vfs_port[2].pci)\r
+        param = " --rxq=16 --txq=16"\r
+        pmd_output1.start_testpmd(cores=list(range(8)), eal_param=eal_param1, param=param, prefix="test2",\r
+                                  socket=self.ports_socket)\r
+        pmd_output1.execute_cmd("flow create 0 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end "\r
+                                "actions rss types ecpri end key_len 0 queues end / end")\r
+        pmd_output1.execute_cmd("flow create 1 ingress pattern eth / ecpri common type iq_data / end actions rss "\r
+                                "types ecpri end key_len 0 queues end / end")\r
+        pmd_output1.execute_cmd("set verbose 1")\r
+        pmd_output1.execute_cmd("set fwd rxonly")\r
+        pmd_output1.execute_cmd("start")\r
+        tag_lst = ['x45', 'x46']\r
+        pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s')".format(Mac_list[1]),\r
+                   "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[2])\r
+                   ]\r
+        out_data = self.get_receive_lst(tag_lst, pkt_lst, pmd_output1)\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(len(set(hash_lst)) == 4 and None not in hash_lst, 'test fail, Rss hash is same.')\r
+        self.pmd_output.execute_cmd("quit", "#")\r
+        out_data = self.get_receive_lst(tag_lst, pkt_lst, pmd_output1)\r
+        # verify\r
+        hash_lst = [i.get('RSS hash') for i in out_data]\r
+        self.verify(hash_lst[0] == hash_lst[2] and hash_lst[1] != hash_lst[3], 'test fail, hash value is wrong.')\r
+        pmd_output1.execute_cmd("quit", '#')\r
+        new_session.close()\r
+\r
+    def create_fdir_rule(self, rule: (list, str), check_stats=None, msg=None, validate=True):\r
+        if validate:\r
+            if isinstance(rule, list):\r
+                validate_rule = [i.replace('create', 'validate') for i in rule]\r
+            else:\r
+                validate_rule = rule.replace('create', 'validate')\r
+            self.validate_fdir_rule(validate_rule, check_stats=check_stats)\r
+        p = re.compile(r"Flow rule #(\d+) created")\r
+        rule_list = []\r
+        if isinstance(rule, list):\r
+            for i in rule:\r
+                out = self.pmd_output.execute_cmd(i, timeout=1)\r
+                if msg:\r
+                    self.verify(msg in out, "failed: expect %s in %s" % (msg, out))\r
+                m = p.search(out)\r
+                if m:\r
+                    rule_list.append(m.group(1))\r
+                else:\r
+                    rule_list.append(False)\r
+        elif isinstance(rule, str):\r
+            out = self.pmd_output.execute_cmd(rule, timeout=1)\r
+            if msg:\r
+                self.verify(msg in out, "failed: expect %s in %s" % (msg, out))\r
+            m = p.search(out)\r
+            if m:\r
+                rule_list.append(m.group(1))\r
+            else:\r
+                rule_list.append(False)\r
+        else:\r
+            raise Exception("unsupported rule type, only accept list or str")\r
+        if check_stats:\r
+            self.verify(all(rule_list), "some rules create failed, result %s" % rule_list)\r
+        elif check_stats == False:\r
+            self.verify(not any(rule_list), "all rules should create failed, result %s" % rule_list)\r
+        return rule_list\r
+\r
+    def validate_fdir_rule(self, rule, check_stats=True, check_msg=None):\r
+        flag = 'Flow rule validated'\r
+        if isinstance(rule, str):\r
+            out = self.pmd_output.execute_cmd(rule, timeout=1)\r
+            if check_stats:\r
+                self.verify(flag in out.strip(), "rule %s validated failed, result %s" % (rule, out))\r
+            else:\r
+                if check_msg:\r
+                    self.verify(flag not in out.strip() and check_msg in out.strip(),\r
+                                "rule %s validate should failed with msg: %s, but result %s" % (rule, check_msg, out))\r
+                else:\r
+                    self.verify(flag not in out.strip(), "rule %s validate should failed, result %s" % (rule, out))\r
+        elif isinstance(rule, list):\r
+            for r in rule:\r
+                out = self.pmd_output.execute_cmd(r, timeout=1)\r
+                if check_stats:\r
+                    self.verify(flag in out.strip(), "rule %s validated failed, result %s" % (r, out))\r
+                else:\r
+                    if not check_msg:\r
+                        self.verify(flag not in out.strip(), "rule %s validate should failed, result %s" % (r, out))\r
+                    else:\r
+                        self.verify(flag not in out.strip() and check_msg in out.strip(),\r
+                                    "rule %s should validate failed with msg: %s, but result %s" % (\r
+                                        r, check_msg, out))\r
+\r
+    def check_fdir_rule(self, port_id=0, stats=True, rule_list=None):\r
+        out = self.pmd_output.execute_cmd("flow list %s" % port_id)\r
+        p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule")\r
+        matched = p.search(out)\r
+        if stats:\r
+            self.verify(matched, "flow rule on port %s is not existed" % port_id)\r
+            if rule_list:\r
+                p = re.compile("^(\d+)\s")\r
+                li = out.splitlines()\r
+                res = list(filter(bool, list(map(p.match, li))))\r
+                result = [i.group(1) for i in res]\r
+                self.verify(sorted(result) == sorted(rule_list),\r
+                            "check rule list failed. expect %s, result %s" % (rule_list, result))\r
+        else:\r
+            if rule_list:\r
+                p = re.compile("^(\d+)\s")\r
+                li = out.splitlines()\r
+                res = list(filter(bool, list(map(p.match, li))))\r
+                result = [i.group(1) for i in res]\r
+                self.verify(not [i for i in rule_list if i in result],\r
+                            "check rule list failed. flow rule %s on port %s is existed" % (rule_list, port_id))\r
+            else:\r
+                self.verify(not matched, "flow rule on port %s is existed" % port_id)\r
+\r
+    def destroy_fdir_rule(self, port_id=0, rule_id=None):\r
+        if rule_id is None:\r
+            rule_id = 0\r
+        if isinstance(rule_id, list):\r
+            for i in rule_id:\r
+                out = self.dut.send_command("flow destroy %s rule %s" % (port_id, i), timeout=1)\r
+                p = re.compile(r"Flow rule #(\d+) destroyed")\r
+                m = p.search(out)\r
+                self.verify(m, "flow rule %s delete failed" % rule_id)\r
+        else:\r
+            out = self.dut.send_command("flow destroy %s rule %s" % (port_id, rule_id), timeout=1)\r
+            p = re.compile(r"Flow rule #(\d+) destroyed")\r
+            m = p.search(out)\r
+            self.verify(m, "flow rule %s delete failed" % rule_id)\r
+\r
+    def send_packets(self, packets, tx_port=None, count=1):\r
+        self.pkt.update_pkt(packets)\r
+        tx_port = self.tester_iface0 if not tx_port else tx_port\r
+        self.pkt.send_pkt(crb=self.tester, tx_port=tx_port, count=count)\r
+\r
+    def send_pkts_getouput(self, pkts, port_id=0, count=1, drop=False):\r
+        tx_port = self.tester_iface0 if port_id == 0 else self.tester_iface1\r
+\r
+        time.sleep(1)\r
+        if drop:\r
+            self.pmd_output.execute_cmd("clear port stats all")\r
+            time.sleep(0.5)\r
+            self.send_packets(pkts, tx_port=tx_port, count=count)\r
+            out = self.pmd_output.execute_cmd("stop")\r
+            self.pmd_output.execute_cmd("start")\r
+        else:\r
+            self.send_packets(pkts, tx_port=tx_port, count=count)\r
+            out = self.pmd_output.get_output()\r
+        return out\r
+\r
+    def _rte_flow_validate(self, vectors):\r
+        test_results = {}\r
+        for tv in vectors:\r
+            try:\r
+                count = 1\r
+                port_id = tv["send_port"]["port_id"] if tv["send_port"].get("port_id") is not None else 0\r
+                dut_port_id = tv["check_param"]["port_id"] if tv["check_param"].get("port_id") is not None else 0\r
+                drop = tv["check_param"].get("drop")\r
+                # create rule\r
+                rule_li = self.create_fdir_rule(tv["rule"], check_stats=True)\r
+                # send and check match packets\r
+                out1 = self.send_pkts_getouput(pkts=tv["scapy_str"]["match"], port_id=port_id,\r
+                                               count=count, drop=drop)\r
+                matched_queue = tv["check_func"](out1, pkt_num=len(tv["scapy_str"]["match"]),\r
+                                                 check_param=tv["check_param"])\r
+                # send and check unmatched packets\r
+                out2 = self.send_pkts_getouput(pkts=tv["scapy_str"]["unmatched"], port_id=port_id,\r
+                                               count=count, drop=drop)\r
+                tv["check_func"](out2, pkt_num=len(tv["scapy_str"]["unmatched"]), check_param=tv["check_param"],\r
+                                 stats=False)\r
+                # list and destroy rule\r
+                self.check_fdir_rule(port_id=tv["check_param"]["port_id"], rule_list=['0'] + rule_li)\r
+                self.destroy_fdir_rule(rule_id=rule_li, port_id=dut_port_id)\r
+                # send matched packet\r
+                out3 = self.send_pkts_getouput(pkts=tv["scapy_str"]["match"], port_id=port_id,\r
+                                               count=count, drop=drop)\r
+                matched_queue2 = tv["check_func"](out3, pkt_num=len(tv["scapy_str"]["match"]),\r
+                                                  check_param=tv["check_param"],\r
+                                                  stats=False)\r
+                if tv["check_param"].get("rss"):\r
+                    self.verify(matched_queue == matched_queue2 and None not in matched_queue,\r
+                                "send twice matched packet, received in deferent queues")\r
+                # check not rule exists\r
+                self.check_fdir_rule(port_id=dut_port_id, rule_list=rule_li, stats=False)\r
+                test_results[tv["name"]] = True\r
+                self.logger.info((GREEN("case passed: %s" % tv["name"])))\r
+            except Exception as e:\r
+                self.logger.warning((RED(e)))\r
+                self.dut.send_command("flow flush 0", timeout=1)\r
+                self.dut.send_command("flow flush 1", timeout=1)\r
+                test_results[tv["name"]] = False\r
+                self.logger.info((GREEN("case failed: %s" % tv["name"])))\r
+                continue\r
+        failed_cases = []\r
+        for k, v in list(test_results.items()):\r
+            if not v:\r
+                failed_cases.append(k)\r
+        self.verify(all(test_results.values()), "{} failed.".format(failed_cases))\r
+\r
+    def test_eCPRI_over_Ethernet_header_pattern_fdir(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.create_fdir_rule(rule=eCPRI_over_Ethernet_rule, check_stats=True)\r
+        self._rte_flow_validate(tv_over_eth)\r
+\r
+    def test_eCPRI_over_IP_or_UDP_header_pattern_fdir(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.create_fdir_rule(rule=eCPRI_over_IP_UDP_rule, check_stats=True)\r
+        self._rte_flow_validate(tv_over_ip_udp)\r
+\r
+    def test_ecpri_fdir_multirules(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        rule_lst = ["flow create 1 ingress pattern eth / ecpri common type iq_data / end actions rss types ecpri end "\r
+                    "key_len 0 queues end / end",\r
+                    "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end actions rss "\r
+                    "types ecpri end key_len 0 queues end / end",\r
+                    "flow create 2 ingress pattern eth / ecpri common type iq_data / end actions rss types ecpri end "\r
+                    "key_len 0 queues end / end",\r
+                    "flow create 2 ingress pattern eth / ipv4 / udp / ecpri common type iq_data / end actions rss "\r
+                    "types ecpri end key_len 0 queues end / end",\r
+                    "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2345 / end "\r
+                    "actions rss queues 5 6 end / mark id 0 / end",\r
+                    "flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2346 / end "\r
+                    "actions passthru / mark id 1 / end",\r
+                    "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end actions "\r
+                    "drop / end",\r
+                    "flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is 0x2346 / end actions "\r
+                    "queue index 1 / mark id 2 / end",\r
+                    "flow create 2 ingress pattern eth / ecpri common type iq_data pc_id is 0x2346 / end actions "\r
+                    "mark id 3 / end",\r
+                    "flow create 2 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is 0x2346 / end "\r
+                    "actions mark / rss / end"]\r
+        for rule in rule_lst:\r
+            self.pmd_output.execute_cmd(rule)\r
+        tag_lst = ['x45', 'x46']\r
+        module_pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')",\r
+                          "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')"]\r
+        pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])]\r
+        data_lst = self.get_receive_lst(tag_lst[: 1], pkt_lst, stats=False)\r
+        queue = [data.get('queue') for data in data_lst]\r
+        self.verify([i for i in queue if i in ['5', '6']], 'pkt go to wrong queue!')\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == ['0x0'], 'pkt has wrong mark id!')\r
+        data_lst = self.get_receive_lst(tag_lst[1: ], pkt_lst)\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == ['0x1'], 'pkt has wrong mark id!')\r
+        pkt_lst = ["Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])]\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst, stats=False)\r
+        self.verify([data.get('queue') for data in data_lst] == [None, '1'], 'pkt go to wrong queue!')\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == [None, '0x2'], 'pkt has wrong mark id!')\r
+        pkt_lst = [pkt.format(Mac_list[2]) for pkt in module_pkt_lst]\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst)\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == [None, None, '0x0', '0x3'], 'pkt has wrong mark id!') \r
+\r
+    def test_ecpri_fdir_negative_case(self):\r
+        out = self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id is "\r
+                               "0x2345 / end actions rss queues 5 6 end / mark id 0 / end")\r
+        self.verify("Failed to create parser engine.: Invalid argument" in out, "test fail, bad rule set success.")\r
+        out = self.pmd_output.execute_cmd("flow list 1")\r
+        r = r'flow list 1(\s+)(.*)'\r
+        m = re.match(r, out)\r
+        self.verify(m.group(2) == '', 'bad rule set successful!')\r
+\r
+    def test_ecpri_fdir_when_DCF_reset(self):\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id "\r
+                                    "is 0x2345 / end actions queue index 1 / mark id 1 / end")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is "\r
+                                    "0x2345 / end actions queue index 2 / mark id 2 / end")\r
+        pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1]),\r
+                   "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])]\r
+        tag_lst = ['x45']\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst)\r
+        # verify\r
+        self.verify([data.get('queue') for data in data_lst] == ['1', '2'], "pkt to the wrong queue!")\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == ['0x1', '0x2'], "pkt with wrong FDIR matched ID!")\r
+        new_session = self.dut.create_session(name="new_session")\r
+        new_session.send_expect('ip link set {} vf 0 mac 00:11:22:33:44:66'.format(self.pf_interface), '#')\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst)\r
+        # verify\r
+        self.verify(data_lst[1].get('queue') == '2', "pkt to the wrong queue!")\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == [None, '0x2'], "pkt with wrong FDIR matched ID!")\r
+        self.dut.send_expect("quit", "#")\r
+        self.launch_testpmd()\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id "\r
+                                    "is 0x2345 / end actions queue index 1 / mark id 1 / end")\r
+        self.pmd_output.execute_cmd("flow create 1 ingress pattern eth / ecpri common type iq_data pc_id is "\r
+                                    "0x2345 / end actions queue index 2 / mark id 2 / end")\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst)\r
+        self.verify([data.get('queue') for data in data_lst] == ['1', '2'], "pkt to the wrong queue!")\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == ['0x1', '0x2'], "pkt with wrong FDIR matched ID!")\r
+        new_session.send_expect("ip link set {} vf 0 trust off".format(self.pf_interface), "#")\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst)\r
+        self.verify(data_lst[1].get('queue') == '2', "pkt to the wrong queue!")\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == [None, '0x2'], "pkt with wrong FDIR matched ID!")\r
+        new_session.close()\r
+\r
+    def test_ecpri_fdir_when_DCF_exit(self):\r
+        self.dut.send_expect("quit", "#")\r
+        eal_param = " -a {},cap=dcf".format(self.sriov_vfs_port[0].pci)\r
+        self.pmd_output.start_testpmd(cores=list(range(8)), eal_param=eal_param, prefix="test1",\r
+                                      socket=self.ports_socket)\r
+        self.pmd_output.execute_cmd("port config 0 udp_tunnel_port add ecpri 0x5123")\r
+        new_session = self.dut.create_session(name="new_session")\r
+        pmd_output1 = PmdOutput(self.dut, new_session)\r
+        eal_param1 = " -a {} -a {}".format(self.sriov_vfs_port[1].pci, self.sriov_vfs_port[2].pci)\r
+        param = " --rxq=16 --txq=16"\r
+        pmd_output1.start_testpmd(cores=list(range(8)), eal_param=eal_param1, param=param, prefix="test2",\r
+                                  socket=self.ports_socket)\r
+        pmd_output1.execute_cmd("flow create 0 ingress pattern eth / ipv4 / udp / ecpri common type iq_data pc_id "\r
+                                "is 0x2345 / end actions queue index 1 / mark id 1 / end")\r
+        pmd_output1.execute_cmd("flow create 0 ingress pattern eth / ecpri common type iq_data pc_id is 0x2345 / end "\r
+                                "actions queue index 2 / mark id 2 / end")\r
+        pmd_output1.execute_cmd("set verbose 1")\r
+        pmd_output1.execute_cmd("set fwd rxonly")\r
+        pmd_output1.execute_cmd("start")\r
+        pkt_lst = ["Ether(dst='{}')/IP()/UDP(dport=0x5123)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1]),\r
+                   "Ether(dst='{}', type=0xAEFE)/Raw(\'\\x10\\x00\\x02\\x24\\x23\\%s\')".format(Mac_list[1])]\r
+        tag_lst = ['x45']\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst, pmd_output=pmd_output1)\r
+        # verify\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == ['0x1', '0x2'] and [data.get('queue') for data in data_lst] == ['1', '2'], 'mark id or queue wrong!')\r
+\r
+        self.dut.send_expect("quit", "#")\r
+        data_lst = self.get_receive_lst(tag_lst, pkt_lst, pmd_output=pmd_output1)\r
+        # verify\r
+        self.verify([data.get('FDIR matched ID') for data in data_lst] == [None, '0x2'] and data_lst[1].get('queue') == '2', 'mark id or queue wrong!')\r
+\r
+    def get_receive_lst(self, tag_lst=[], pkt_lst=[], pmd_output='', stats=True):\r
+        data_lst = []\r
+        for tag in tag_lst:\r
+            for pkt in pkt_lst:\r
+                pkt_str = pkt % tag\r
+                out = self.send_pkt(pkt_str=pkt_str, pmd_output=pmd_output)\r
+                rfc.verify_directed_by_rss(out, rxq=16, stats=stats) \r
+                reta_line = self.get_receive_data(out)\r
+                data_lst.append(reta_line)\r
+        return data_lst\r
+\r
+    def get_receive_data(self, out):\r
+        reta_line = {}\r
+        lines = out.split("\r\n")\r
+        for line in lines:\r
+            line = line.strip()\r
+            if len(line) != 0 and line.strip().startswith("port "):\r
+                reta_line = {}\r
+                rexp = r"port (\d)/queue (\d{1,2}): received (\d) packets"\r
+                m = re.match(rexp, line.strip())\r
+                if m:\r
+                    reta_line["port"] = m.group(1)\r
+                    reta_line["queue"] = m.group(2)\r
+\r
+            elif len(line) != 0 and line.startswith(("src=",)):\r
+                for item in line.split("-"):\r
+                    item = item.strip()\r
+                    if item.startswith("RSS hash"):\r
+                        name, value = item.split("=", 1)\r
+                        reta_line[name.strip()] = value.strip()\r
+                    elif item.startswith("FDIR matched ID"):\r
+                        name, value = item.split("=", 1)\r
+                        reta_line[name.strip()] = value.strip()\r
+        return reta_line\r
+\r
+    def compile_dpdk(self):\r
+        cmd_lst = [r"sed -i '/iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);/i\printf(\"++++++++++++ptype=%u\\n\",IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0));' ",\r
+                   r"sed -i '/IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);/{:a;n;s/ifdef RTE_ARCH_X86/if 0/g;/struct iavf_rx_queue/!ba}' ",\r
+                   r"sed -i '/rx_pkt_burst = iavf_recv_pkts;/{n;s/\}/\}dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;\n/g}' "]\r
+        for cmd in cmd_lst:\r
+            self.dut.send_expect(cmd + self.file_path, "#")\r
+        self.dut.build_install_dpdk(self.target)\r
+\r
+    def send_and_verify(self, dts_mac, ecpri, if_match=True):\r
+        ptype_lst = ptype_match_lst if if_match else ptype_nomatch_lst\r
+        for i in range(len(pkt_lst)):\r
+            out = self.send_pkt(pkt_lst[i], dts_mac=dts_mac, ecpri=ecpri)\r
+            self.verify(ptype_lst[i] in out, 'ptype is error, expect {}'.format(ptype_lst[i]))\r
+\r
+    def send_pkt(self, pkt_str='', dts_mac='00:11:22:33:44:11', ecpri='0x5123', pmd_output=''):\r
+        self.pkt.append_pkt(pkt_str.format(dts_mac, ecpri))\r
+        self.pkt.send_pkt(crb=self.tester, tx_port=self.tester_iface0, count=1)\r
+        out = pmd_output.get_output() if pmd_output else self.pmd_output.get_output()\r
+        self.pkt.update_pkt([])\r
+        return out\r
+\r
+    def tear_down(self):\r
+        self.dut.kill_all()\r
+\r
+    def tear_down_all(self):\r
+        self.dut.kill_all()\r
-- 
2.25.1


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-04-02 10:36 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-02 10:34 [dts] [PATCH V1] tests: add new suite cvl ecpri Zhou Jun

test suite reviews and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror http://inbox.dpdk.org/dts/0 dts/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dts dts/ http://inbox.dpdk.org/dts \
		dts@dpdk.org
	public-inbox-index dts

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dts


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git