* [dts] [PATCH V2] add test suite hotplug
@ 2016-11-22 6:39 xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite ip_pipeline_config_file xu,gang
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: xu,gang @ 2016-11-22 6:39 UTC (permalink / raw)
To: dts; +Cc: xu,gang
Signed-off-by: xu,gang <gangx.xu@intel.com>
---
tests/TestSuite_hotplug.py | 153 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 153 insertions(+)
create mode 100644 tests/TestSuite_hotplug.py
diff --git a/tests/TestSuite_hotplug.py b/tests/TestSuite_hotplug.py
new file mode 100644
index 0000000..0945dfd
--- /dev/null
+++ b/tests/TestSuite_hotplug.py
@@ -0,0 +1,153 @@
+#BSD LICENSE
+#
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+DPDK Test suite.
+Test port hot plug.
+"""
+
+import string
+import time
+import re
+import utils
+from test_case import TestCase
+from plotting import Plotting
+from settings import HEADER_SIZE
+from etgen import IxiaPacketGenerator
+from packet import Packet, sniff_packets, load_sniff_packets
+
+class TestPortHotPlug(TestCase):
+ """
+ This feature only supports igb_uio now and not support freebsd
+ """
+ def set_up_all(self):
+ """
+ Run at the start of each test suite.
+ """
+ self.dut_ports = self.dut.get_ports(self.nic)
+ self.verify(len(self.dut_ports) >= 2, "Insufficient ports")
+ cores = self.dut.get_core_list("1S/4C/1T")
+ self.coremask = utils.create_mask(cores)
+ self.port = len(self.dut_ports) - 1
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ self.dut.send_expect("./tools/dpdk-devbind.py -u %s" % self.dut.ports_info[self.port]['pci'],"#",60)
+
+ def attach(self, port):
+ """
+ attach port
+ """
+ self.dut.send_expect("port attach %s" % self.dut.ports_info[port]['pci'],"is attached",60)
+ self.dut.send_expect("port start %s" % port,"Link Up",60)
+ self.dut.send_expect("show port info %s" % port,"testpmd>",60)
+
+ def detach(self, port):
+ """
+ detach port
+ """
+ self.dut.send_expect("port stop %s" % port,"Link Down",60)
+ self.dut.send_expect("port close %s" % port,"Closing ports...",60)
+ self.dut.send_expect("port detach %s" % port,"is detached",60)
+
+ def test_after_attach(self):
+ """
+ first run testpmd after attach port
+ """
+ cmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %s -- -i" % (self.coremask,self.dut.get_memory_channels())
+ self.dut.send_expect(cmd,"testpmd>",60)
+ session_secondary = self.dut.new_session()
+ session_secondary.send_expect("./tools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'], "#", 60)
+ self.attach(self.port)
+ self.dut.send_expect("start","testpmd>",60)
+ self.dut.send_expect("port detach %s" % self.port,"Please close port first",60)
+ self.dut.send_expect("stop","testpmd>",60)
+ self.detach(self.port)
+ self.attach(self.port)
+
+ self.dut.send_expect("start","testpmd>",60)
+ self.dut.send_expect("port detach %s" % self.port,"Please close port first",60)
+ self.send_packe(self.port)
+ out = self.dut.send_expect("show port stats %s" % self.port ,"testpmd>",60)
+ packet = re.search("RX-packets:\s*(\d*)",out)
+ sum_packet = packet.group(1)
+ self.verify(sum_packet = 1, "Insufficient the received package")
+ self.dut.send_expect("quit","#",60)
+
+ def send_packe(self, port):
+ """
+ Send a packet to port
+ """
+ self.dmac = self.dut.get_mac_address(self.dut_ports[0])
+ txport = self.tester.get_local_port(self.dut_ports[0])
+ self.txItf = self.tester.get_interface(txport)
+ pkt = Packet(pkt_type='UDP')
+ pkt.config_layer('ether', {'dst': self.dmac,})
+ pkt.send_pkt(tx_port=self.txItf)
+
+ def test_before_attach(self):
+ """
+ first attach port after run testpmd
+ """
+ session_secondary = self.dut.new_session()
+ session_secondary.send_expect("./tools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'], "#", 60)
+
+ cmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %s -- -i" % (self.coremask,self.dut.get_memory_channels())
+ self.dut.send_expect(cmd,"testpmd>",60)
+ self.detach(self.port)
+ self.attach(self.port)
+ self.dut.send_expect("start","testpmd>",60)
+ self.dut.send_expect("port detach %s" % self.port, "Please close port first",60)
+ self.send_packe(self.port)
+ out = self.dut.send_expect("show port stats %s" % self.port ,"testpmd>",60)
+ packet = re.search("RX-packets:\s*(\d*)",out)
+ sum_packet = packet.group(1)
+ self.verify(sum_packet = 1, "Insufficient the received package")
+ self.dut.send_expect("quit","#",60)
+
+
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+ self.dut.send_expect("./tools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'],"#",60)
+ self.dut.kill_all()
+ time.sleep(2)
+
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ pass
+
--
1.9.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dts] [PATCH V2] add Test Suite ip_pipeline_config_file
2016-11-22 6:39 [dts] [PATCH V2] add test suite hotplug xu,gang
@ 2016-11-22 6:39 ` xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite l2fwd-jobstats xu,gang
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: xu,gang @ 2016-11-22 6:39 UTC (permalink / raw)
To: dts; +Cc: xu,gang
Signed-off-by: xu,gang <gangx.xu@intel.com>
---
test_plans/ip_pipeline_config_file_test_plan.rst | 2971 ++++++++++++++++++++++
tests/TestSuite_ip_pipeline_config_file.py | 334 +++
2 files changed, 3305 insertions(+)
create mode 100644 test_plans/ip_pipeline_config_file_test_plan.rst
create mode 100644 tests/TestSuite_ip_pipeline_config_file.py
diff --git a/test_plans/ip_pipeline_config_file_test_plan.rst b/test_plans/ip_pipeline_config_file_test_plan.rst
new file mode 100644
index 0000000..038fff3
--- /dev/null
+++ b/test_plans/ip_pipeline_config_file_test_plan.rst
@@ -0,0 +1,2971 @@
+.. BSD LICENSE
+ Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Test Case: test_ip_pipeline_cfg
+===============================
+
+cfg file default-FIREWALL.cfg
+-----------------------------
+;The default values are stored in this file
+[PIPELINE0]
+type = MASTER
+core = 0
+;PIPELINE1 all global variable
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+
+type = FIREWALL
+n_rules = 1
+pkt_type = ipv4
+
+Run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file default-FLOW_ACTIONS.cfg
+---------------------------------
+;The default values are stored in this file
+[PIPELINE0]
+type = MASTER
+core = 0
+;PIPELINE1 all global variable
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file default-FLOW_CLASSIFICATION.cfg
+------------------------------------------
+;The default values are stored in this file
+[PIPELINE0]
+type = MASTER
+core = 0
+;PIPELINE1 all global variable
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file default-PASS-THROUGH.cfg
+---------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+
+type = PASS-THROUGH
+dma_size = 16
+dma_dst_offset = 64
+dma_src_offset = 150
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF
+dma_hash_offset = 80
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file default-ROUTING.cfg
+----------------------------
+;The default values are stored in this file
+[PIPELINE0]
+type = MASTER
+core = 0
+;PIPELINE1 all global variable
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+
+type = ROUTING
+n_routes = 4096
+ip_hdr_offset = 270
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FIREWALL-n_rules-1.cfg
+------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = 1
+pkt_type = ipv4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FIREWALL-n_rules-4294967295.cfg
+----------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = 4294967295
+pkt_type = ipv4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FIREWALL-pkt_type-qinq_ipv4.cfg
+----------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = 1
+pkt_type = qinq_ipv4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FIREWALL-pkt_type-vlan_ipv4.cfg
+---------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = 1
+pkt_type = vlan_ipv4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-color_offset-0.cfg
+-----------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 0
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-color_offset-4294967295.cfg
+------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 4294967295
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-flow_id_offset-0.cfg
+--------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 0
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-flow_id_offset-4294967295.cfg
+--------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 4294967295
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-n_flows-2.cfg
+-----------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 2
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-n_flows-4096.cfg
+--------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_ACTIONS-n_meters_per_flow-4.cfg
+-----------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-hash_offset-0.cfg
+---------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 16
+hash_offset = 0
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-hash_offset-4294967295.cfg
+-------------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 16
+hash_offset = 4294967295
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-key_offset-0.cfg
+----------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 0
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-key_offset-4294967295.cfg
+--------------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 4294967295
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-key_size-16.cfg
+----------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-key_size-8.cfg
+------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 8
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg filetest-FLOW_CLASSIFICATION-n_flows-2147483648.cfg
+-------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 2147483648
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FLOW_CLASSIFICATION-n_flows-2.cfg
+-----------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 2
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-ROUTING-ip_hdr_offset-0.cfg
+------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 4096
+ip_hdr_offset = 0
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-ROUTING-ip_hdr_offset-4294967295.cfg
+--------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 4096
+ip_hdr_offset = 4294967295
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-ROUTING-n_routes-1.cfg
+-------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 1
+ip_hdr_offset = 270
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-ROUTING-n_routes-4294967295.cfg
+-----------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 4294967295
+ip_hdr_offset = 270
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs successfully
+
+cfg file test-FIREWALL-n_rules--1.cfg
+-------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = -1
+pkt_type = ipv4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FIREWALL-n_rules-4294967296.cfg
+---------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = 4294967296
+pkt_type = ipv4
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FIREWALL-pkt_type-abcde.cfg
+------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FIREWALL
+n_rules = 1
+pkt_type = abcde
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-color_offset--1.cfg
+----------------------------------------------[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = -1
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-color_offset-4294967296.cfg
+--------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 4294967296
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-flow_id_offset--1.cfg
+------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = -1
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-flow_id_offset-4294967296.cfg
+---------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 4294967296
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-ip_hdr_offset--1.cfg
+-----------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = -1
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-ip_hdr_offset-4294967296.cfg
+----------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 4294967296
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-n_flows-0.cfg
+------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 0
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-n_flows--1.cfg
+-----------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = -1
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-n_flows-4294967296.cfg
+----------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4294967296
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-n_meters_per_flow-0.cfg
+--------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 0
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_ACTIONS-n_meters_per_flow-5.cfg
+--------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_ACTIONS
+n_flows = 4096
+flow_id_offset = 286
+color_offset = 192
+ip_hdr_offset = 270
+n_meters_per_flow = 5
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-hash_offset--1.cfg
+-------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 16
+hash_offset = -1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-hash_offset-4294967296.cfg
+-----------------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 16
+hash_offset = 4294967296
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-key_offset--1.cfg
+-----------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = -1
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-key_offset-4294967296.cfg
+---------------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 4294967296
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-key_size-7.cfg
+---------------------------------------------------
+PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4096
+key_offset = 192
+key_size = 7
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-n_flows-0.cfg
+-------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 0
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-n_flows--1.cfg
+-----------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = -1
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-FLOW_CLASSIFICATION-n_flows-4294967296.cfg
+------------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = FLOW_CLASSIFICATION
+n_flows = 4294967296
+key_offset = 192
+key_size = 16
+hash_offset = 208
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-ROUTING-ip_hdr_offset--1.cfg
+---------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 4096
+ip_hdr_offset = -1
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-ROUTING-ip_hdr_offset-4294967296.cfg
+------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 4096
+ip_hdr_offset = 4294967296
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-ROUTING-n_routes--1.cfg
+-----------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = -1
+ip_hdr_offset = 270
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+cfg file test-ROUTING-n_routes-4294967296.cfg
+--------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+core = 1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0
+msgq_out = MSGQ0
+timer_period = 100
+type = ROUTING
+n_routes = 4294967296
+ip_hdr_offset = 270
+
+run the command ./build/ip_pipeline -p 0x3 -f ~/ip_pipeline/ip_pipeline_cfg/postive/filename, check that the app runs failure
+
+
+
+Test Case: test_ip_pipeline_cpu_utilization
+===========================================
+running ip_pipeline_cpu_utilization with the command::
+ ./examples/ip_pipeline/build/ip_pipeline -p 0xf -f ./examples/ip_pipeline/config/edge_router_downstream.cfg -s ./examples/ip_pipeline/config/edge_router_downstream.sh
+
+then::
+ send package my scapy tool
+
+check::
+ pipeline>t 1 headroom
+if is 100% the test pass, else fail
+Test Case: test_ip_pipeline_link_identification
+===============================================
+
+Description
+Link identification has 2 goals, first is to create a brand new option in CFG file's LINK
+section, so a link can be confgiured specific physical port using the PCI address. So the
+user can either use original "-p" option, or the new LINK section item to identify the
+physical port he/she wants to use; Second is to maintain the original "-p" option as it is.
+That is, the following logic shall be applied so that two ways will not mix.
+
+--------------------------------------------------------------------
+ |a. "-p" cmd option exists | b. "-p" cmd option not exist
+--------------------------------------------------------------------
+1 |EAL section "pci_whitelist" |EAL section "pci_whitelist" is
+ |option is allowed and optional |NOT allowed
+--------------------------------------------------------------------
+2 |LINK section "pci_bdf" option |LINK section "pci_bdf" option
+ |is NOT allowed |is MANDATORY
+------------------------------------------------------------------------
+
+1. Regression for all the old CFG files(regression team should cover it)
+------------------------------------------------------------------------
+
+----------------------------------------------------------------
+2. CFG file containing 1 "pci_whitelist" section with "-p" exist shall work
+([root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -p 1 -f ./config/pt1_2ports_link_iden.cfg
+Parse error in section "EAL", entry "pci_whitelist": too many elements
+
+Aborted (core dumped))
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+
+[EAL]
+pci_whitelist = 0000:85:00.0
+Run the command ./build/ip_pipeline -p 1 -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully
+
+----------------------------------------------------------------
+3. CFG file containing 2 "pci_whitelist" sections with "-p" exists
+([root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg
+Parse error in section "EAL", entry "pci_whitelist": too many elements
+
+Aborted (core dumped))
+----------------------------------------------------------------
+Config file as follows
+
+
+Run the command ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully
+
+----------------------------------------------------------------
+4. CFG file containing 4 "pci_whitelist" sections with "-p" exists
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+
+[EAL]
+pci_whitelist = 0000:83:00.0
+pci_whitelist = 0000:83:00.1
+pci_whitelist = 0000:85:00.0
+pci_whitelist = 0000:85:00.1
+Run the command ./build/ip_pipeline -p f -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully
+
+----------------------------------------------------------------
+5. CFG file containing "pci_bdf" in LINK section shall NOT work with "-p" exists
+([root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg
+Parse error in section "LINK0", entry "pci_bdf": entry not allowed (port_mask is provided)
+
+Aborted (core dumped))
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+[LINK1]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test fail, otherwise, test pass
+
+----------------------------------------------------------------
+6. CFG file containing "pci_bdf" in LINK section and EAL section shall NOT work with "-p" exists(EAL and LINK not matches)
+([root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg
+Parse error in section "LINK0", entry "pci_bdf": entry not allowed (port_mask is provided)
+
+Aborted (core dumped))
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+
+[EAL]
+pci_whitelist = 0000:83:00.0
+pci_whitelist = 0000:83:00.1
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+[LINK1]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test fail, otherwise, test pass
+
+----------------------------------------------------------------
+7. CFG file containing "pci_bdf" in LINK section and EAL section shall NOT work with "-p" exists(EAL and LINK matches)
+([root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg
+Parse error in section "LINK0", entry "pci_bdf": entry not allowed (port_mask is provided)
+
+Aborted (core dumped))
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+
+[EAL]
+pci_whitelist = 0000:85:00.0
+pci_whitelist = 0000:85:00.1
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+[LINK1]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test fail, otherwise, test pass
+
+----------------------------------------------------------------
+8. CFG file containing "pci_whitelist" in EAL section shall NOT work with "-p" absence
+([root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -f ./config/pt1_2ports_link_iden.cfg
+Parse error in section "EAL", entry "pci_whitelist": entry to be generated by the application (port_mask not provided)
+
+Aborted (core dumped))
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+
+[EAL]
+pci_whitelist = 0000:85:00.0
+pci_whitelist = 0000:85:00.1
+Run the command ./build/ip_pipeline -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test fail, otherwise, test pass
+
+
+----------------------------------------------------------------
+9. CFG file containing "pci_bdf" in 1 LINK sections shall work with "-p" absence
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0
+pktq_out = TXQ0.0
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+Run the command ./build/ip_pipeline -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test pass, otherwise, test fail
+
+----------------------------------------------------------------
+10. CFG file containing "pci_bdf" in 2 LINK sections shall work with "-p" absence
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+[LINK1]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test pass, otherwise, test fail
+
+----------------------------------------------------------------
+11. CFG file containing "pci_bdf" in 4 LINK sections shall work with "-p" absence
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+
+[LINK0]
+pci_bdf = 0000:83:00.0
+
+[LINK1]
+pci_bdf = 0000:83:00.1
+
+[LINK2]
+pci_bdf = 0000:85:00.0
+
+[LINK3]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test pass, otherwise, test fail
+
+----------------------------------------------------------------
+12. CFG file containing "pci_bdf" in 1 LINK sections and 2 LINKS exists shall work with "-p" absence
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+[LINK1]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test pass, otherwise, test fail
+
+----------------------------------------------------------------
+13. CFG file containing "pci_bdf" in 1 LINK sections and 2 LINKS exists shall NOT work with "-p" exists
+[root@dpdk_testserver03 ip_pipeline]# ./build/ip_pipeline -p 3 -f ~/config/link_identification/link_iden_case_13.cfg
+Parse error in section "LINK0", entry "pci_bdf": entry not allowed (port_mask is provided)
+
+Aborted (core dumped)
+----------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+
+[LINK0]
+pci_bdf = 0000:85:00.0
+
+[LINK1]
+pci_bdf = 0000:85:00.1
+Run the command ./build/ip_pipeline -p 3 -f ./config/pt1_2ports_link_iden.cfg, check that the app runs successfully, if yes, test pass, otherwise, test fail
+
+Test Case: test_ip_pipeline_parser_cleanup
+=========================================
+
+1. Test config parser for pipeline passthrough
+
+2. Test config parser for pipeline flow classification(need to test based on bugs raised)
+
+3. Test config parser for pipeline flow action
+
+4. Test config parser for pipeline firewall
+
+5. Test config parser for pipeline routing
+
+6. Add multiple whitespaces between argumetns in pktq_in/out and msgq_in/out section
+
+7. Create cfg files with all kinds of errors(like the section not supposed to be there, or out of range value)
+
+8. In pipeline specific cfg section, write n_rules=4k/64k/512k/16M and check if the table memory have the same size of n_rules=4096/65536... exact values
+(problems is how to check the table memory, need to check with dev???)
+
+--------------------------------------------------
+1. An invalid entry should be recognized(negative)
+--------------------------------------------------
+Config file as follows, pipeline is passthrough
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+dma_src_offset = 278; mbuf (128) + headroom (128) + 1st ethertype offset (14) + ttl offset within ip header = 278 (ipv4)
+dma_dst_offset = 128; mbuf (128)
+dma_size = 16
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF
+dma_hash_offset = 144; (dma_dst_offset+dma_size)
+lb = hash
+test = 123
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_mesgq_2ports.cfg
+check that an invalid entry is recognized by config parser, "lb" is not a recognized entry, it should report the error, if yes, test pass, otherwise, test fail
+
+pipeline> [APP] Initializing PIPELINE1 ...
+[PIPELINE1] Pass-through
+Parse error in section "PIPELINE1": invalid entry "test"
+PANIC in app_init_pipelines():
+Pipeline instance "PIPELINE1" back-end init error
+7: [./build/ip_pipeline() [0x432615]]
+6: [/lib64/libc.so.6(__libc_start_main+0xf5) [0x33dca21d65]]
+5: [./build/ip_pipeline(main+0x5f) [0x43187f]]
+4: [./build/ip_pipeline(app_init+0x15b8) [0x4419e8]]
+3: [./build/ip_pipeline() [0x43f85b]]
+2: [./build/ip_pipeline(__rte_panic+0xc9) [0x42c33c]]
+1: [./build/ip_pipeline(rte_dump_stack+0x1a) [0x4d5dca]]
+Aborted (core dumped)
+
+--------------------------------------------------------------
+2. Multiple whitespaces between arguments in pktq_in entry
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+dma_src_offset = 278; mbuf (128) + headroom (128) + 1st ethertype offset (14) + ttl offset within ip header = 278 (ipv4)
+dma_dst_offset = 128; mbuf (128)
+dma_size = 16
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF
+dma_hash_offset = 144; (dma_dst_offset+dma_size)
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_mesgq_2ports.cfg
+Check that the config parser can recognize the multiple whitespaces between each argument of pktq_in. If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+3. Multiple whitespaces between arguments in pktq_out entry
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+dma_src_offset = 278; mbuf (128) + headroom (128) + 1st ethertype offset (14) + ttl offset within ip header = 278 (ipv4)
+dma_dst_offset = 128; mbuf (128)
+dma_size = 16
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF
+dma_hash_offset = 144; (dma_dst_offset+dma_size)
+;lb = hash
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_mesgq_2ports.cfg
+Check that the config parser can recognize the multiple whitespaces between each argument of pktq_out. If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+4. Multiple whitespaces between arguments in msgq_in entry
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = s0c0
+;msgq_in = MSGQ-REQ-PIPELINE0 ; for customer in .cfg.out, can not assign in .cfg file, because these are used as the communication for master and other pipeline.
+;msgq_out = MSGQ-RSP-PIPELINE0
+timer_period = 1
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s0c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0 MSGQ1 MSGQ2 MSGQ3 ;MSGQ-REQ-PIPELINE1, dev will confirm on how to use it
+msgq_out = MSGQ0 MSGQ1 MSGQ2 MSGQ3 ;MSGQ-RSP-PIPELINE1
+timer_period = 100
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_mesgq_2ports.cfg
+Check that the config parser can recognize the multiple whitespaces between each argument of msgq_in. If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+5. Multiple whitespaces between arguments in msgq_out entry
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = s0c0
+;msgq_in = MSGQ-REQ-PIPELINE0
+;msgq_out = MSGQ-RSP-PIPELINE0
+timer_period = 1
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s0c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.0 TXQ1.0
+msgq_in = MSGQ0 MSGQ1 MSGQ2 MSGQ3 ;MSGQ-REQ-PIPELINE1
+msgq_out = MSGQ0 MSGQ1 MSGQ2 MSGQ3 ;MSGQ-RSP-PIPELINE1
+timer_period = 100
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_mesgq_2ports.cfg
+Check that the config parser can recognize the multiple whitespaces between each argument of msgq_in. If yes, test passed, otherwise, test failed.
+
+-----------------------------------
+6. Not recognized section(negative)
+-----------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+dma_src_offset = 278; mbuf (128) + headroom (128) + 1st ethertype offset (14) + ttl offset within ip header = 278 (ipv4)
+dma_dst_offset = 128; mbuf (128)
+dma_size = 16
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF
+dma_hash_offset = 144; (dma_dst_offset+dma_size)
+;lb = hash
+
+[TESTSECTION]
+type = flow classification
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_2ports.cfg
+Check that the config parser can report out the invalid section and print out "CFG:....." messages. If yes, test passed, otherwise, test failed.
+
+---------------------------------
+7. Not recognized entry(negative)
+---------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ1.0 TXQ0.0
+dma_src_offset = 278; mbuf (128) + headroom (128) + 1st ethertype offset (14) + ttl offset within ip header = 278 (ipv4)
+dma_dst_offset = 128; mbuf (128)
+dma_size = 16
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF
+dma_hash_offset = 144; (dma_dst_offset+dma_size)
+;lb = hash
+test = value
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/pt1_2ports.cfg
+Check that the config parser can report out the invalid section and print out "CFG:....." messages. If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+8. Check that the n_rules can equal to 4k and the table memory have the same size of n_rules=4096(need to check with dev how to check the table memory)
+firewall can not be seem, use flow classifcaion, n_flows, check table size
+[PIPELINE2] Flow classification
+TABLE: rte_table_hash_create_key16_ext: Hash table memory footprint is 397440 bytes
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ0 SWQ1
+dma_size = 16
+dma_dst_offset = 192; mbuf (128) + 64
+dma_src_offset = 278; mbuf (128) + headroom (128) + ethernet header (14) + ttl offset within ip header (8) = 278
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF; ipv4 5-tuple
+dma_hash_offset = 208; dma_dst_offset + dma_size
+
+[PIPELINE2]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ1.0 TXQ0.0
+n_flows = 4k
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/fc_2ports.cfg
+Check that app runs successfully and the table memory have the same size of n_flows=4096 If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+9. Check that the n_rules can equal to 64k and the table memory have the same size of n_rules=65536(need to check with dev how to check the table memory)
+[PIPELINE2] Flow classification
+TABLE: rte_table_hash_create_key16_ext: Hash table memory footprint is 6357120 bytes
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ0 SWQ1
+dma_size = 16
+dma_dst_offset = 192; mbuf (128) + 64
+dma_src_offset = 278; mbuf (128) + headroom (128) + ethernet header (14) + ttl offset within ip header (8) = 278
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF; ipv4 5-tuple
+dma_hash_offset = 208; dma_dst_offset + dma_size
+
+[PIPELINE2]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ1.0 TXQ0.0
+n_flows = 64k
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/fc_2ports.cfg
+Check that app runs successfully and the table memory have the same size of n_rules=65536 If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+10. Check that the n_rules can equal to 512k and the table memory have the same size of n_rules=524288(need to check with dev how to check the table memory)
+[PIPELINE2] Flow classification
+TABLE: rte_table_hash_create_key16_ext: Hash table memory footprint is 50856064 bytes
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ0 SWQ1
+dma_size = 16
+dma_dst_offset = 192; mbuf (128) + 64
+dma_src_offset = 278; mbuf (128) + headroom (128) + ethernet header (14) + ttl offset within ip header (8) = 278
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF; ipv4 5-tuple
+dma_hash_offset = 208; dma_dst_offset + dma_size
+
+[PIPELINE2]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ1.0 TXQ0.0
+n_flows = 512k
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/fc_2ports.cfg
+Check that app runs successfully and the table memory have the same size of n_rules=524288 If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+11. Check that the n_rules can equal to 16M and the table memory have the same size of n_rules=16777216(need to check with dev how to check the table memory), maximum is 2^32, use 2^32+1 to test out of range
+[PIPELINE2] Flow classification
+
+TABLE: rte_table_hash_create_key16_ext: Hash table memory footprint is 1627390080 bytes
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ0 SWQ1
+dma_size = 16
+dma_dst_offset = 192; mbuf (128) + 64
+dma_src_offset = 278; mbuf (128) + headroom (128) + ethernet header (14) + ttl offset within ip header (8) = 278
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF; ipv4 5-tuple
+dma_hash_offset = 208; dma_dst_offset + dma_size
+
+[PIPELINE2]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ1.0 TXQ0.0
+n_flows = 16M
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/fc_2ports.cfg
+Check that app runs successfully and the table memory have the same size of n_rules=16777216 If yes, test passed, otherwise, test failed.
+
+--------------------------------------------------------------
+12. Check that the n_rules can equal to 16M+1 and the table memory have the same size of n_rules=16777216(need to check with dev how to check the table memory), maximum is 2^32, use 2^32+1 to test out of range
+[PIPELINE2] Flow classification
+Parse error in section "PIPELINE2": entry "n_flows" has invalid value ("16M+1")
+PANIC in app_init_pipelines():
+Pipeline instance "PIPELINE2" back-end init error
+7: [./build/ip_pipeline() [0x431925]]
+6: [/lib64/libc.so.6(__libc_start_main+0xf5) [0x33dca21d65]]
+5: [./build/ip_pipeline(main+0x5f) [0x4304ff]]
+4: [./build/ip_pipeline(app_init+0x13fb) [0x43f3bb]]
+3: [./build/ip_pipeline() [0x43d449]]
+2: [./build/ip_pipeline(__rte_panic+0xc9) [0x42af1a]]
+1: [./build/ip_pipeline(rte_dump_stack+0x1a) [0x4c8c6a]]
+Aborted (core dumped)
+--------------------------------------------------------------
+Config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = s1c1
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ0 SWQ1
+dma_size = 16
+dma_dst_offset = 192; mbuf (128) + 64
+dma_src_offset = 278; mbuf (128) + headroom (128) + ethernet header (14) + ttl offset within ip header (8) = 278
+dma_src_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF; ipv4 5-tuple
+dma_hash_offset = 208; dma_dst_offset + dma_size
+
+[PIPELINE2]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ1.0 TXQ0.0
+n_flows = 16M+1
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+
+Command to run: ./build/ip_pipeline -p 3 -f ./config/parsercleanup/fc_2ports.cfg
+Check that app reports errors, if yes, test pass, otherwise, test fail
+
+Test Case: test_ip_pipeline_source_sink
+=======================================
+
+1. change the CONFIG_RTE_PORT_PCAP compiler option lies in config/common_base
+2. config file should be use source and sink not rxq and txq, config file as follows
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+3. Since there is only source/sink port in the config file, there is no physical port, so it should be no -p in the command line, command should be as follows
+./build/ip_pipeline -f ./config/config_file_name.cfg
+4. change the folder/file to the pcap file to be root, for example, pcap is installed in the folder /home/anna/file.pcap, then should use linux command
+chmod 777 /home
+chmod 777 /anna
+chmod 777 file.pcap
+5. pcap_bytes_rd_per_pkt can be 0 or anyother value, 0 means read whole frame, which takes more memory, if only need IP header, then it can be 64,
+which saves lots of memory
+
+
+----------------------------------------------------------
+1. Functional test for source port on passthrough pipeline
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+--------------------------------------------------------
+2. Functional test for sink port on passthrough pipeline
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+---------------------------------------------------------------
+3. Functional test for source+sink port on passthrough pipeline
+---------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /home/eth1.1.pcap /home/eth2.1.pcap
+pcap_n_pkt_wr = 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-------------------------------------------------
+4. Same pcap read file, different pcap write file
+-------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 81
+command to run
+./build/ip_pipeline -f ./config/source_sink_port_1.cfg
+
+After the app finsihed runing, check that the eth1.1.pcap and eth2.1.pcap have the same first 80 frames, and check that the eth2.1.pcap have 81 frames
+1st frame and 81th frame should be the same frame, which is the first 64 bytes of 1st frame of eth1.pcap file.
+
+--------------------------------------------------
+5. Same pcap write file, different pcap read file
+--------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth1.1.pcap
+pcap_n_pkt_wr = 80 200
+comand to run
+./build/ip_pipeline -f ./config/source_sink_port_2.cfg
+After the app finished runing, check that the eth1.1.pcap should have 200?(280?) frames, check that the app finished runing correctly
+
+-----------------------------------------------------
+6. pcap_n_pkt_wr equals 0, continuous write sink file
+-----------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 0 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port_3.cfg
+After the app finished running, check that the eth1.pcap should have more than 80 frames, check that the eth2.1.pcap have 200 frames
+
+--------------------------------
+7. 4 source port and 4 sink port
+--------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1 SOURCE2 SOURCE3
+pktq_out = SINK0 SINK1 SINK2 SINK3
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap /root/eth1.pcap /root/eth1.1.pcap
+pcap_bytes_rd_per_pkt = 0 64 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap /root/eth3.1.pcap /root/eth4.1.pcap
+pcap_n_pkt_wr = 80 200 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port_4.cfg
+After the app finished running, check that the eth1.1.pcap and eth5.pcap should have the same size and frames, eth2.1.pcap and eth6.pcap should
+have the sme size and frames
+
+----------------------------------------------------------------
+8. Add source and sink into the EDGE_ROUTER_DOWNSTREAM.CFG, need to check with zhang, fan about which pipeline to add the SOURCE/SINK
+----------------------------------------------------------------
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1 SOURCE2 SOURCE3
+pktq_out = SWQ0 SWQ1 SWQ2 SWQ3
+encap = ethernet_qinq
+qinq_sched = test
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap /root/eth1.pcap /root/eth1.1.pcap
+pcap_bytes_rd_per_pkt = 0 64 0 64
+
+[PIPELINE2]
+type = PASS-THROUGH
+core = 2
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5 SWQ6 SWQ7
+
+[PIPELINE3]
+type = PASS-THROUGH
+core = 3
+pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
+pktq_out = SINK0 SINK1 SINK2 SINK3
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap /root/eth3.1.pcap /root/eth4.1.pcap
+pcap_n_pkt_wr = 80 200 80 200
+
+[MEMPOOL0]
+pool_size = 512
+
+only use 1 or 2 ports pcap_n_pkt_wr = 80
+
+---------------------------------------
+9. Source port and sink port not paired, negative
+---------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1 SOURCE2
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app report errors successfully and quit
+normally, then test pass
+
+-----------------------------------------
+10. Source port and pcap file read not paired, negative
+-----------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap
+pcap_bytes_rd_per_pkt = 64 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 0 200
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap drops the packets, check that the app report errors successfully and quit
+normally, then test pass
+
+-----------------------------------------------------
+11. Source port and pcap bytes read per packet not paired, negative
+-----------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app report errors successfully and quit
+normally, then test pass
+
+--------------------------------------------
+12. Sink port and pcap file write not paired, negative
+--------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_wr = /root/eth1.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+------------------------------------------------
+13. Sink port and pcap n packet write not paired, negative
+------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcaplast modified time, if it's the same, test pass,
+otherwise, test fail
+
+-------------------------------------------------------------------
+14. Functional test for source port on flow classification pipeline (Fan will double check it)
+-------------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_flows = 512
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+-----------------------------------------------------------------
+15. Functional test for sink port on flow classification pipeline
+-----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_flows = 512
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+pcap_file_wr = /root/eth1.pcap /root/eth2.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+------------------------------------------------------------------------
+16. Functional test for source+sink port on flow classification pipeline(need to run with rules)
+------------------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = FLOW_CLASSIFICATION
+core = s1c2
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_flows = 512
+key_offset = 192; dma_dst_offset
+key_size = 16; dma_size
+hash_offset = 208; dma_hash_offset = 80
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+17. Functional test for source port on flow action pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = FLOW_ACTIONS
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_flows = 16
+n_meters_per_flow = 1
+flow_id_offset = 286; mbuf (128) + headroom (128) + ethernet (14) + ip dst offset (16)
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet (14) = 270
+color_offset = 192; mbuf (128) + 64
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+18. Functional test for sink port on flow action pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = FLOW_ACTIONS
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_flows = 16
+n_meters_per_flow = 1
+flow_id_offset = 286; mbuf (128) + headroom (128) + ethernet (14) + ip dst offset (16)
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet (14) = 270
+color_offset = 192; mbuf (128) + 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+19. Functional test for source+sink port on flow action pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = FLOW_ACTIONS
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_flows = 16
+n_meters_per_flow = 1
+flow_id_offset = 286; mbuf (128) + headroom (128) + ethernet (14) + ip dst offset (16)
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet (14) = 270
+color_offset = 192; mbuf (128) + 64
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+20. Functional test for source port on rt arp on pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+21. Functional test for sink port on rt arp on pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+22. Functional test for source+sink port on rt arp on pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+23. Functional test for source port on rt arp off pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+24. Functional test for sink port on rt arp off pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+25. Functional test for source+sink port on rt arp off pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+26. Functional test for source port on rt mpls arp off pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+encap = ethernet_mpls
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+pcap_bytes_rd_per_pkt = 0 0
+;pcap_file_wr = /home/fanzhan2/eth3.pcap /home/fanzhan2/eth2.pcap
+;pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+27. Functional test for sink port on rt mpls arp off pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+encap = ethernet_mpls
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+;pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+;pcap_bytes_rd_per_pkt = 0 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+28. Functional test for source+sink port on rt mpls arp off pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+encap = ethernet_mpls
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+pcap_bytes_rd_per_pkt = 0 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+29. Functional test for source port on rt mpls arp on pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+encap = ethernet_mpls
+mpls_color_mark = no
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64
+;color_offset = 256; mbuf (128) + headroom (128) = 256
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+30. Functional test for sink port on rt mpls arp on pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+encap = ethernet_mpls
+mpls_color_mark = no
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64
+;color_offset = 256; mbuf (128) + headroom (128) = 256
+;pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+;pcap_bytes_rd_per_pkt = 64 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+31. Functional test for source+sink port on rt mpls arp on pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+encap = ethernet_mpls
+mpls_color_mark = no
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64
+;color_offset = 256; mbuf (128) + headroom (128) = 256
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+32. Functional test for source port on rt qinq arp off pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+encap = ethernet_qinq
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+pcap_bytes_rd_per_pkt = 0 0
+;pcap_file_wr = /home/fanzhan2/eth3.pcap /home/fanzhan2/eth2.pcap
+;pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+33. Functional test for sink port on rt qinq arp off pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+encap = ethernet_qinq
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+;pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+;pcap_bytes_rd_per_pkt = 0 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+34. Functional test for source+sink port on rt qinq arp off pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_routes = 4096
+encap = ethernet_qinq
+;ip_hdr_offset = 142; headroom (128) + ethernet header (14) = 142
+ip_hdr_offset = 270; headroom (128) + ethernet header (14) = 142
+pcap_file_rd = /root/eth1.pcap /root/eth1.pcap
+pcap_bytes_rd_per_pkt = 0 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+-----------------------------------------------------------
+35. Functional test for source port on rt qinq arp on pipeline
+-----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+encap = ethernet_qinq
+qinq_sched = no
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64 = 192
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+---------------------------------------------------------
+36. Functional test for sink port on rt qinq arp on pipeline
+---------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+encap = ethernet_qinq
+qinq_sched = no
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64 = 192
+;pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+;pcap_bytes_rd_per_pkt = 64 0
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------------
+37. Functional test for source+sink port on rt qinq arp on pipeline
+----------------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = ROUTING
+core = s1c1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+n_arp_entries = 1024
+encap = ethernet_qinq
+qinq_sched = no
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+arp_key_offset = 192; mbuf (128) + 64 = 192
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 0 64
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 200
+command to run
+./build/ip_pipeline -f ./config/source_sink_port.cfg
+After the app finished running, eth1.pcap and eth1.1.pcap should have the same size, eth2.pcap and eth2.1.pcap should have the same size, and,
+in eth2.1.pcap, the first frame and the 101 frame should have the same frame which is the first 64 byte of 1st frame in eth2.pcap.
+
+----------------------------------------------------------
+38. Functional test for source port on passthrough pipeline for pcap_file_rd
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+;pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+The app sends the packets by reading the eth1.pcap and eth2.pcap file and drops the packets, check that the app runs successfully and quit
+normally, then test pass
+
+----------------------------------------------------------
+39. Functional test for source port on passthrough pipeline for pcap_file_rd and pcap_bytes_rd_per_pkt(negative)
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+;pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+Check that the app reports parser errors successfully and quit, then test pass, otherwise, test fail
+
+----------------------------------------------------------
+40. Functional test for source port on passthrough pipeline for pcap_file_rd_per_pkt(negative)
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+Check that the app reports parser errors successfully and quit, then test pass, otherwise, test fail
+
+--------------------------------------------------------
+41. Functional test for sink port on passthrough pipeline for pcap_file_wr
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+;pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+--------------------------------------------------------
+42. Functional test for sink port on passthrough pipeline for pcap_file_wr and pcap_n_pkt_wr
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+;pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+--------------------------------------------------------
+43. Functional test for sink port on passthrough pipeline for pcap_n_pkt_wr
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+----------------------------------------------------------
+44. Functional test for source port on passthrough pipeline for pcap_file_rd(negative)
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+;pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+Check that the app reports errors successfully and quit normally, then test pass, otherwise, test fail
+
+----------------------------------------------------------
+45. Functional test for source port on passthrough pipeline for pcap_file_rd and pcap_bytes_rd_per_pkt
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+Check that the app runs successfully and quit, then test pass, otherwise, test fail
+
+----------------------------------------------------------
+46. Functional test for source port on passthrough pipeline for pcap_file_rd_per_pkt
+----------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_rd = /root/eth1.pcap /root/eth2.pcap
+pcap_bytes_rd_per_pkt = 64 0
+Command to run
+./build/ip_pipeline -f ./config/source_port.cfg
+Check that the app runs successfully and quit, then test pass, otherwise, test fail
+
+--------------------------------------------------------
+47. Functional test for sink port on passthrough pipeline for pcap_file_wr(negative)
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+;pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+Check the app reports parser errors successfully, then test pass, otherwise, test fail
+
+--------------------------------------------------------
+48. Functional test for sink port on passthrough pipeline for pcap_file_wr and pcap_n_pkt_wr
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+;pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+After the app finished runing, since the app send null frames from the source port, check that the pcap file size is 0, and check the
+time stamp when the app finished runing, compare with the eth1.pcap and eth2.pcap last modified time, if it's the same, test pass,
+otherwise, test fail
+
+--------------------------------------------------------
+49. Functional test for sink port on passthrough pipeline for pcap_n_pkt_wr(negative)
+--------------------------------------------------------
+config file as follows
+[PIPELINE0]
+type = MASTER
+core = 0
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = SOURCE0 SOURCE1
+pktq_out = SINK0 SINK1
+;pcap_file_wr = /root/eth1.1.pcap /root/eth2.1.pcap
+pcap_n_pkt_wr = 80 0
+Command to run
+./build/ip_pipeline -f ./config/sink_port.cfg
+Check that the app reports parser errors successfully, then test pass, otherwise, test fail
diff --git a/tests/TestSuite_ip_pipeline_config_file.py b/tests/TestSuite_ip_pipeline_config_file.py
new file mode 100644
index 0000000..ab58f46
--- /dev/null
+++ b/tests/TestSuite_ip_pipeline_config_file.py
@@ -0,0 +1,334 @@
+#BSD LICENSE
+#
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+DPDK Test suite.
+Test ip_pipeline_configuration file.
+"""
+
+import string
+import time
+import re
+import os
+from test_case import TestCase
+from plotting import Plotting
+from settings import HEADER_SIZE
+from etgen import IxiaPacketGenerator
+from packet import Packet, sniff_packets, load_sniff_packets
+
+class Test_ip_pipeline_configuration_file(TestCase):
+
+ def set_up_all(self):
+ """
+ Run at the start of each test suite.
+ """
+ self.dut_ports = self.dut.get_ports(self.nic)
+ self.verify(len(self.dut_ports) >= 4, "Insufficient ports")
+ cores = self.dut.get_core_list("1S/4C/1T")
+
+ self.dut.send_expect("chmod 777 /root/","#",30)
+ self.dut.send_expect("sed -i -e 's/CONFIG_RTE_PORT_PCAP=.*$/"
+ + "CONFIG_RTE_PORT_PCAP=y/' config/common_base", "# ", 30)
+ self.dut.build_install_dpdk(self.target)
+
+ #unpack test configuration
+ cwd = os.getcwd()
+ self.tester.send_expect("cd %s/dep/" % cwd,"# ",50)
+ result = self.tester.send_expect("tar xzvf ip_pipeline.tar.gz","# ",50)
+ self.verify("error" not in result, "ip_pipeline unpack fail ")
+
+ self.update_ip_pipeline_config()
+
+ #copy configuration file to dut and unpack
+ self.dut.session.copy_file_to("dep/ip_pipeline.tar.gz")
+ put = self.dut.send_expect("tar zxvf ~/ip_pipeline.tar.gz -C ~","#",500)
+
+ self.path = "./examples/ip_pipeline/build/ip_pipeline"
+
+ self.filepath = self.get_filepath()
+ # build sample app
+ out = self.dut.build_dpdk_apps("./examples/ip_pipeline")
+ self.verify("Error" not in out, "compilation error 1")
+ self.verify("No such file" not in out, "compilation error 2")
+
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ pass
+
+ def replace_pci(self,filename,old,new):
+ """
+ modify the file by new code
+ """
+ try:
+ lines = open(filename,'r').readlines()
+ flen = len(lines)
+ for i in range(flen):
+ if old in lines[i]:
+ lines[i] = lines[i].replace(old,new)
+ open(filename,'w').writelines(lines)
+ except Exception,e:
+ print e
+
+ def get_filepath(self):
+ """
+ get file path
+ """
+ filepatch = []
+ sourc_work_path = os.getcwd()
+ os.chdir("./dep")
+ rootdir = "ip_pipeline/"
+ for parent,dirnames,filenames in os.walk(rootdir):
+ for filename in filenames:
+ filepatch.append(os.path.join(parent,filename))
+ os.chdir(sourc_work_path)
+ return filepatch
+ def update_ip_pipeline_config(self):
+ """
+ modify the ip_pipeline cfg file's pci and pack again
+ """
+ po_rootdir = "dep/ip_pipeline/ip_pipeline_link_identification/"
+ for po_parent,po_dirnames,po_filenames in os.walk(po_rootdir):
+ for i in po_filenames:
+ filename = os.path.join(po_parent,i)
+ self.replace_pci(filename,"port0","%s" % self.dut.ports_info[0]['pci'])
+ self.replace_pci(filename,"port1","%s" % self.dut.ports_info[1]['pci'])
+ self.replace_pci(filename,"port2","%s" % self.dut.ports_info[2]['pci'])
+ self.replace_pci(filename,"port3","%s" % self.dut.ports_info[3]['pci'])
+ self.tester.send_expect("rm -rf ip_pipeline.tar.gz","#",10)
+ result = self.tester.send_expect("tar -czvf ip_pipeline.tar.gz ip_pipeline/","#",50)
+ self.verify("error" not in result, "ip_pipeline compression fail")
+ self.tester.send_expect("cd ../","#",10)
+
+ def scapy_send_package(self,num = 1):
+ """
+ Send a packet to port
+ """
+ txport = self.tester.get_local_port(self.dut_ports[0])
+ mac = self.dut.get_mac_address(self.dut_ports[0])
+ txItf = self.tester.get_interface(txport)
+ self.tester.scapy_append('sendp([Ether(dst="%s")/IP()/UDP()/Raw(\'X\'*18)], iface="%s",count=%s)' % (mac, txItf,num))
+ self.tester.scapy_execute()
+
+ def print_info(self,filename):
+ """
+ modify print info
+ """
+ print "\033[1;31;40m"
+ print "Test failed on %s" % filename
+ print "\033[0m"
+ def F_test_ip_pipeline_rss(self):
+ """
+ test ip_pipeline_rss
+ """
+ failcase = []
+ for filename in self.filepath:
+ if "ip_pipeline_rss/" in filename:
+ cmd = self.path + " -p 0x3 -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",120)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.verify(len(failcase) == 0, "Failed cfg file nameon %s" % failcase)
+
+ def test_ip_pipeline_cfg(self):
+ """
+ according to the cfg to run ip_pipeline
+ check them build successful
+ """
+ failcase = []
+ for filename in self.filepath:
+ if "ip_pipeline_cfg/postive" in filename:
+ cmd = self.path + " -p 0x3 -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",120)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ elif "ip_pipeline_cfg/negative" in filename:
+ cmd = self.path + " -p 0x3 -f ~/%s" % filename
+ out = self.dut.send_expect(cmd,"# ",60)
+ if "error" not in out:
+ print out
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("quit","# ",10)
+ self.verify(len(failcase) == 0, "Failed cfg file nameon %s" % failcase)
+
+ def test_ip_pipeline_cpu_utilization(self):
+ """
+ according to the cfg to run ip_pipeline
+ send packet
+ check them receive successful
+ """
+ self.dut.send_expect("cd examples/ip_pipeline","#",50)
+ self.dut.send_expect("sed -i -e 's/pool_size = .*$/"
+ + "pool_size = 4096/' config/edge_router_downstream.cfg", "# ", 30)
+ cmd = "./build/ip_pipeline -p 0xf -f config/edge_router_downstream.cfg -s config/edge_router_downstream.sh"
+ out = self.dut.send_expect(cmd,"#p 1 route ls",50)
+ self.scapy_send_package(1000)
+ self.dut.send_expect("","pipeline> ",5)
+ out = self.dut.send_expect("t 1 headroom","pipeline> ",10)
+ self.dut.send_expect("quit","# ",10)
+ print out
+ self.verify(out > "50%" , "headroom error")
+ self.dut.send_expect("cd -","#",50)
+
+ def test_ip_pipeline_link_identification(self):
+ """
+ according to the cfg to run ip_pipeline
+ check them build successful
+ """
+ failcase = []
+ for filename in self.filepath:
+ if "ip_pipeline_link_identification/postive" in filename:
+ if "_f" in filename:
+ cmd = self.path + " -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",50)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye!" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ elif "_pf" in filename:
+ cmd = self.path + " -p f -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",50)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye!" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ elif "1p" in filename:
+ cmd = self.path + " -p 1 -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",50)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye!" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ elif "ip_pipeline_link_identification/negative" in filename:
+ if "_f" in filename:
+ cmd = self.path + " -f ~/%s" % filename
+ out = self.dut.send_expect(cmd,"# ",50)
+ if "error" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("quit","# ",10)
+ elif "_1p" in filename:
+ cmd = self.path + " -p 1 -f ~/%s" % filename
+ out = self.dut.send_expect(cmd,"# ",50)
+ if "error" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("quit","# ",10)
+ elif "_3p" in filename:
+ cmd = self.path + " -p 3 -f ~/%s" % filename
+ out = self.dut.send_expect(cmd,"#",50)
+ if "error" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("quit","# ",10)
+ self.verify(len(failcase) == 0, "Failed cfg file nameon %s" % failcase)
+
+ def test_ip_pipeline_parser_cleanup(self):
+ """
+ according to the cfg to run ip_pipeline
+ check them build successful
+ """
+ failcase = []
+ for filename in self.filepath:
+ if "ip_pipeline_parser_cleanup/postive" in filename:
+ cmd = self.path + " -p 0x3 -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",50)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye!" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ elif "ip_pipeline_parser_cleanup/negative" in filename:
+ cmd = self.path + " -p 0x3 -f ~/%s" % filename
+ out = self.dut.send_expect(cmd,"# ",50)
+ if "error" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("quit","# ",10)
+ self.verify(len(failcase) == 0, "Failed cfg file nameon %s" % failcase)
+
+ def test_ip_pipeline_source_sink(self):
+ """
+ according to the cfg to run ip_pipeline
+ check them build successful
+ """
+ failcase = []
+ for filename in self.filepath:
+ if "ip_pipeline_source_sink/postive" in filename:
+ self.dut.send_expect("cp ~/ip_pipeline/*.pcap ~","#",50)
+ if "_com" in filename:
+ cmd = self.path + " -f ~/%s" % filename
+ self.dut.send_expect(cmd,"PORT: Dumped",50)
+ time.sleep(10)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye!" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ else:
+ cmd = self.path + " -f ~/%s" % filename
+ self.dut.send_expect(cmd,"[PIPELINE1]",50)
+ out = self.dut.send_expect("quit","# ",10)
+ if "Bye!" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("rm -rf ~/*.pcap","#",50)
+ elif "ip_pipeline_source_sink/negative" in filename:
+ self.dut.send_expect("cp ~/ip_pipeline/*.pcap ~","#",50)
+ cmd = self.path + " -f ~/%s" % filename
+ out = self.dut.send_expect(cmd,"# ",50)
+ if "error" not in out:
+ self.print_info(filename)
+ failcase.append(filename)
+ self.dut.send_expect("quit","# ",10)
+ self.dut.send_expect("rm -rf ~/*.pcap","#",50)
+ self.verify(len(failcase) == 0, "Failed cfg file nameon %s" % failcase)
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+
+ self.dut.kill_all()
+ time.sleep(2)
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ self.dut.send_expect("sed -i -e 's/CONFIG_RTE_PORT_PCAP=.*$/"
+ + "CONFIG_RTE_PORT_PCAP=n/' config/common_base", "# ", 30)
+ self.dut.build_install_dpdk(self.target)
+
--
1.9.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dts] [PATCH V2] add Test Suite l2fwd-jobstats
2016-11-22 6:39 [dts] [PATCH V2] add test suite hotplug xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite ip_pipeline_config_file xu,gang
@ 2016-11-22 6:39 ` xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add test suite packet ordering xu,gang
2016-11-23 5:05 ` [dts] [PATCH V2] add test suite hotplug Liu, Yong
3 siblings, 0 replies; 5+ messages in thread
From: xu,gang @ 2016-11-22 6:39 UTC (permalink / raw)
To: dts; +Cc: xu,gang
Signed-off-by: xu,gang <gangx.xu@intel.com>
---
test_plans/l2fwd-jobstats_test_plan.rst | 87 ++++++++++++++++++++++++
tests/TestSuite_l2fwd-jobstats.py | 115 ++++++++++++++++++++++++++++++++
2 files changed, 202 insertions(+)
create mode 100644 test_plans/l2fwd-jobstats_test_plan.rst
create mode 100644 tests/TestSuite_l2fwd-jobstats.py
diff --git a/test_plans/l2fwd-jobstats_test_plan.rst b/test_plans/l2fwd-jobstats_test_plan.rst
new file mode 100644
index 0000000..75c0cc5
--- /dev/null
+++ b/test_plans/l2fwd-jobstats_test_plan.rst
@@ -0,0 +1,87 @@
+.. BSD LICENSE
+ Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+L2 Forwarding Sample Application (in Real and Virtualized Environments) with core load statistics.
+==================================================================================================
+
+The L2 Forwarding sample application is a simple example of packet processing using
+the Data Plane Development Kit (DPDK) which
+also takes advantage of Single Root I/O Virtualization (SR-IOV) features in a virtualized environment.
+
+.. note::
+
+ This application is a variation of L2 Forwarding sample application. It demonstrate possible
+ scheme of job stats library usage therefore some parts of this document is identical with original
+ L2 forwarding application.
+
+Overview
+--------
+
+The L2 Forwarding sample application, which can operate in real and virtualized environments,
+performs L2 forwarding for each packet that is received.
+The destination port is the adjacent port from the enabled portmask, that is,
+if the first four ports are enabled (portmask 0xf),
+ports 1 and 2 forward into each other, and ports 3 and 4 forward into each other.
+Also, the MAC addresses are affected as follows:
+
+* The source MAC address is replaced by the TX port MAC address
+
+* The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID
+
+Running the Application
+-----------------------
+
+The application requires a number of command line options:
+
+.. code-block:: console
+
+ ./build/l2fwd-jobstats [EAL options] -- -p PORTMASK [-q NQ] [-l]
+
+where,
+
+* p PORTMASK: A hexadecimal bitmask of the ports to configure
+
+* q NQ: A number of queues (=ports) per lcore (default is 1)
+
+* l: Use locale thousands separator when formatting big numbers.
+
+To run the application in linuxapp environment with 4 lcores, 16 ports, 8 RX queues per lcore and
+thousands separator printing, issue the command:
+
+.. code-block:: console
+
+ $ ./build/l2fwd-jobstats -c f -n 4 -- -q 8 -p ffff -l
+
+Refer to the *DPDK Getting Started Guide* for general information on running applications
+and the Environment Abstraction Layer (EAL) options.
+
+check
+-----
+send packet and check them receive successful
diff --git a/tests/TestSuite_l2fwd-jobstats.py b/tests/TestSuite_l2fwd-jobstats.py
new file mode 100644
index 0000000..c514af9
--- /dev/null
+++ b/tests/TestSuite_l2fwd-jobstats.py
@@ -0,0 +1,115 @@
+#BSD LICENSE
+#
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+DPDK Test suite.
+Test l2fwd-jobstats
+"""
+
+import string
+import time
+import re
+import utils
+from test_case import TestCase
+from plotting import Plotting
+from settings import HEADER_SIZE
+from etgen import IxiaPacketGenerator
+from packet import Packet, sniff_packets, load_sniff_packets
+
+class TestL2fwdJobstats(TestCase):
+
+ def set_up_all(self):
+ """
+ Run at the start of each test suite.
+ """
+
+ self.dut_ports = self.dut.get_ports(self.nic)
+ self.verify(len(self.dut_ports) >= 2, "Insufficient ports")
+ cores = self.dut.get_core_list("1S/4C/1T")
+ self.coremask = utils.create_mask(cores)
+ self.portmask = utils.create_mask(self.dut_ports)
+ self.path = "./examples/l2fwd-jobstats/build/l2fwd-jobstats"
+
+ # build sample app
+ out = self.dut.build_dpdk_apps("./examples/l2fwd-jobstats")
+ self.verify("Error" not in out, "compilation error 1")
+ self.verify("No such file" not in out, "compilation error 2")
+
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ pass
+
+ def test_l2fwd_jobstats(self):
+ """
+ Verify netmap compatibility with one port
+ """
+ cmd = self.path + " -c %s -n %d -- -q 8 -p %s -l" % (self.coremask, self.dut.get_memory_channels(), self.portmask)
+
+ #start netmap_compat with one port
+ self.dut.send_expect(cmd,"Port statistics",60)
+
+ self.scapy_send_packet()
+ out = self.dut.get_session_output(timeout=10)
+ p = re.compile(r'\d+')
+ result = p.findall(out)
+ amount = 1 * len(self.dut_ports)
+ self.verify(str(amount) in result, "Wrong: can't get <%s> package")
+
+ def scapy_send_packet(self):
+ """
+ Send a packet to port
+ """
+ for i in range(len(self.dut_ports)):
+ self.dmac = self.dut.get_mac_address(self.dut_ports[0])
+ txport = self.tester.get_local_port(self.dut_ports[0])
+ self.txItf = self.tester.get_interface(txport)
+ pkt = Packet(pkt_type='UDP')
+ pkt.config_layer('ether', {'dst': self.dmac,})
+ pkt.send_pkt(tx_port=self.txItf)
+
+
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+ self.dut.kill_all()
+ time.sleep(2)
+ pass
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ pass
+
--
1.9.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dts] [PATCH V2] add test suite packet ordering
2016-11-22 6:39 [dts] [PATCH V2] add test suite hotplug xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite ip_pipeline_config_file xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite l2fwd-jobstats xu,gang
@ 2016-11-22 6:39 ` xu,gang
2016-11-23 5:05 ` [dts] [PATCH V2] add test suite hotplug Liu, Yong
3 siblings, 0 replies; 5+ messages in thread
From: xu,gang @ 2016-11-22 6:39 UTC (permalink / raw)
To: dts; +Cc: xu,gang
Signed-off-by: xu,gang <gangx.xu@intel.com>
---
test_plans/packet_ordering_test_plan.rst | 75 ++++++++++++++++++++
tests/TestSuite_packet_ordering.py | 114 +++++++++++++++++++++++++++++++
2 files changed, 189 insertions(+)
create mode 100644 test_plans/packet_ordering_test_plan.rst
create mode 100644 tests/TestSuite_packet_ordering.py
diff --git a/test_plans/packet_ordering_test_plan.rst b/test_plans/packet_ordering_test_plan.rst
new file mode 100644
index 0000000..efe3304
--- /dev/null
+++ b/test_plans/packet_ordering_test_plan.rst
@@ -0,0 +1,75 @@
+.. BSD LICENSE
+ Copyright(c) 2015 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Packet Ordering Application
+============================
+
+The Packet Ordering sample app simply shows the impact of reordering a stream.
+It's meant to stress the library with different configurations for performance.
+
+Overview
+--------
+
+The application uses at least three CPU cores:
+
+* RX core (maser core) receives traffic from the NIC ports and feeds Worker
+ cores with traffic through SW queues.
+
+* Worker core (slave core) basically do some light work on the packet.
+ Currently it modifies the output port of the packet for configurations with
+ more than one port enabled.
+
+* TX Core (slave core) receives traffic from Worker cores through software queues,
+ inserts out-of-order packets into reorder buffer, extracts ordered packets
+ from the reorder buffer and sends them to the NIC ports for transmission.
+case test_packet_ordering
+---------------------------
+Compiling the Application
+
+The application execution command line is:
+
+.. code-block:: console
+
+ ./packet_ordering [EAL options] -- -p PORTMASK [--disable-reorder]
+
+The -c EAL CPU_COREMASK option has to contain at least 3 CPU cores.
+The first CPU core in the core mask is the master core and would be assigned to
+RX core, the last to TX core and the rest to Worker cores.
+
+The PORTMASK parameter must contain either 1 or even enabled port numbers.
+When setting more than 1 port, traffic would be forwarded in pairs.
+For example, if we enable 4 ports, traffic from port 0 to 1 and from 1 to 0,
+then the other pair from 2 to 3 and from 3 to 2, having [0,1] and [2,3] pairs.
+
+The disable-reorder long option does, as its name implies, disable the reordering
+of traffic, which should help evaluate reordering performance impact.
+
+check::
+ change the number of cpu core,check the work normal
diff --git a/tests/TestSuite_packet_ordering.py b/tests/TestSuite_packet_ordering.py
new file mode 100644
index 0000000..3793818
--- /dev/null
+++ b/tests/TestSuite_packet_ordering.py
@@ -0,0 +1,114 @@
+#BSD LICENSE
+#
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""
+DPDK Test suite.
+Test packet ordering.
+"""
+
+import string
+import time
+import re
+from test_case import TestCase
+from plotting import Plotting
+from settings import HEADER_SIZE
+from etgen import IxiaPacketGenerator
+from packet import Packet, sniff_packets, load_sniff_packets
+import utils
+class TestPacketOrdering(TestCase):
+
+ def set_up_all(self):
+ """
+ Run at the start of each test suite.
+ """
+ self.dut_ports = self.dut.get_ports(self.nic)
+ self.verify(len(self.dut_ports) >= 2, "Insufficient ports")
+ self.path = "./examples/packet_ordering/build/packet_ordering"
+ self.portmask = utils.create_mask(self.dut_ports)
+
+ # build sample app
+ out = self.dut.build_dpdk_apps("./examples/packet_ordering")
+ self.verify("Error" not in out, "compilation error 1")
+ self.verify("No such file" not in out, "compilation error 2")
+
+ def set_up(self):
+ """
+ Run before each test case.
+ """
+ pass
+
+
+ def test_packet_ordering(self):
+ """
+ test packet ordering on different core
+ """
+ for i in range(1,3):
+ cores = self.dut.get_core_list("1S/%sC/1T" % i)
+ coremask = utils.create_mask(cores)
+ cmd = self.path + " -c %s -n %d -- -p %s" % (coremask, self.dut.get_memory_channels(), self.portmask)
+ out = self.dut.send_expect(cmd,"# ",60)
+ self.verify("Error" in out, "Wrong: can error package")
+ for i in range(3,19):
+ cores = self.dut.get_core_list("1S/%sC/1T" % i)
+ coremask = utils.create_mask(cores)
+ cmd = self.path + " -c %s -n %d -- -p %s" % (coremask,self.dut.get_memory_channels(), self.portmask)
+ self.dut.send_expect(cmd,"send_thread()",60)
+ self.scapy_send_package()
+ out = self.dut.send_expect("^C","# ",50)
+ packet = re.search(" - Pkts rxd:\s*(\d*)",out)
+ sum_packet = packet.group(1)
+ self.verify("1" == sum_packet, "Wrong: can error package")
+
+ def scapy_send_package(self):
+ """
+ Send a packet to port
+ """
+ self.dmac = self.dut.get_mac_address(self.dut_ports[0])
+ txport = self.tester.get_local_port(self.dut_ports[0])
+ self.txItf = self.tester.get_interface(txport)
+ pkt = Packet(pkt_type='UDP')
+ pkt.config_layer('ether', {'dst': self.dmac,})
+ pkt.send_pkt(tx_port=self.txItf)
+
+ def tear_down(self):
+ """
+ Run after each test case.
+ """
+ pass
+
+ def tear_down_all(self):
+ """
+ Run after each test suite.
+ """
+ self.dut.kill_all()
+ time.sleep(2)
+
--
1.9.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dts] [PATCH V2] add test suite hotplug
2016-11-22 6:39 [dts] [PATCH V2] add test suite hotplug xu,gang
` (2 preceding siblings ...)
2016-11-22 6:39 ` [dts] [PATCH V2] add test suite packet ordering xu,gang
@ 2016-11-23 5:05 ` Liu, Yong
3 siblings, 0 replies; 5+ messages in thread
From: Liu, Yong @ 2016-11-23 5:05 UTC (permalink / raw)
To: xu,gang, dts
Thanks, applied.
On 11/22/2016 02:39 PM, xu,gang wrote:
> Signed-off-by: xu,gang <gangx.xu@intel.com>
> ---
> tests/TestSuite_hotplug.py | 153 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 153 insertions(+)
> create mode 100644 tests/TestSuite_hotplug.py
>
> diff --git a/tests/TestSuite_hotplug.py b/tests/TestSuite_hotplug.py
> new file mode 100644
> index 0000000..0945dfd
> --- /dev/null
> +++ b/tests/TestSuite_hotplug.py
> @@ -0,0 +1,153 @@
> +#BSD LICENSE
> +#
> +# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
> +# All rights reserved.
> +#
> +# Redistribution and use in source and binary forms, with or without
> +# modification, are permitted provided that the following conditions
> +# are met:
> +#
> +# * Redistributions of source code must retain the above copyright
> +# notice, this list of conditions and the following disclaimer.
> +# * Redistributions in binary form must reproduce the above copyright
> +# notice, this list of conditions and the following disclaimer in
> +# the documentation and/or other materials provided with the
> +# distribution.
> +# * Neither the name of Intel Corporation nor the names of its
> +# contributors may be used to endorse or promote products derived
> +# from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> +
> +
> +"""
> +DPDK Test suite.
> +Test port hot plug.
> +"""
> +
> +import string
> +import time
> +import re
> +import utils
> +from test_case import TestCase
> +from plotting import Plotting
> +from settings import HEADER_SIZE
> +from etgen import IxiaPacketGenerator
> +from packet import Packet, sniff_packets, load_sniff_packets
> +
> +class TestPortHotPlug(TestCase):
> + """
> + This feature only supports igb_uio now and not support freebsd
> + """
> + def set_up_all(self):
> + """
> + Run at the start of each test suite.
> + """
> + self.dut_ports = self.dut.get_ports(self.nic)
> + self.verify(len(self.dut_ports) >= 2, "Insufficient ports")
> + cores = self.dut.get_core_list("1S/4C/1T")
> + self.coremask = utils.create_mask(cores)
> + self.port = len(self.dut_ports) - 1
> + def set_up(self):
> + """
> + Run before each test case.
> + """
> + self.dut.send_expect("./tools/dpdk-devbind.py -u %s" % self.dut.ports_info[self.port]['pci'],"#",60)
> +
> + def attach(self, port):
> + """
> + attach port
> + """
> + self.dut.send_expect("port attach %s" % self.dut.ports_info[port]['pci'],"is attached",60)
> + self.dut.send_expect("port start %s" % port,"Link Up",60)
> + self.dut.send_expect("show port info %s" % port,"testpmd>",60)
> +
> + def detach(self, port):
> + """
> + detach port
> + """
> + self.dut.send_expect("port stop %s" % port,"Link Down",60)
> + self.dut.send_expect("port close %s" % port,"Closing ports...",60)
> + self.dut.send_expect("port detach %s" % port,"is detached",60)
> +
> + def test_after_attach(self):
> + """
> + first run testpmd after attach port
> + """
> + cmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %s -- -i" % (self.coremask,self.dut.get_memory_channels())
> + self.dut.send_expect(cmd,"testpmd>",60)
> + session_secondary = self.dut.new_session()
> + session_secondary.send_expect("./tools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'], "#", 60)
> + self.attach(self.port)
> + self.dut.send_expect("start","testpmd>",60)
> + self.dut.send_expect("port detach %s" % self.port,"Please close port first",60)
> + self.dut.send_expect("stop","testpmd>",60)
> + self.detach(self.port)
> + self.attach(self.port)
> +
> + self.dut.send_expect("start","testpmd>",60)
> + self.dut.send_expect("port detach %s" % self.port,"Please close port first",60)
> + self.send_packe(self.port)
> + out = self.dut.send_expect("show port stats %s" % self.port ,"testpmd>",60)
> + packet = re.search("RX-packets:\s*(\d*)",out)
> + sum_packet = packet.group(1)
> + self.verify(sum_packet = 1, "Insufficient the received package")
> + self.dut.send_expect("quit","#",60)
> +
> + def send_packe(self, port):
> + """
> + Send a packet to port
> + """
> + self.dmac = self.dut.get_mac_address(self.dut_ports[0])
> + txport = self.tester.get_local_port(self.dut_ports[0])
> + self.txItf = self.tester.get_interface(txport)
> + pkt = Packet(pkt_type='UDP')
> + pkt.config_layer('ether', {'dst': self.dmac,})
> + pkt.send_pkt(tx_port=self.txItf)
> +
> + def test_before_attach(self):
> + """
> + first attach port after run testpmd
> + """
> + session_secondary = self.dut.new_session()
> + session_secondary.send_expect("./tools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'], "#", 60)
> +
> + cmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %s -- -i" % (self.coremask,self.dut.get_memory_channels())
> + self.dut.send_expect(cmd,"testpmd>",60)
> + self.detach(self.port)
> + self.attach(self.port)
> + self.dut.send_expect("start","testpmd>",60)
> + self.dut.send_expect("port detach %s" % self.port, "Please close port first",60)
> + self.send_packe(self.port)
> + out = self.dut.send_expect("show port stats %s" % self.port ,"testpmd>",60)
> + packet = re.search("RX-packets:\s*(\d*)",out)
> + sum_packet = packet.group(1)
> + self.verify(sum_packet = 1, "Insufficient the received package")
> + self.dut.send_expect("quit","#",60)
> +
> +
> + def tear_down(self):
> + """
> + Run after each test case.
> + """
> + self.dut.send_expect("./tools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'],"#",60)
> + self.dut.kill_all()
> + time.sleep(2)
> +
> +
> + def tear_down_all(self):
> + """
> + Run after each test suite.
> + """
> + pass
> +
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2016-11-23 5:06 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-22 6:39 [dts] [PATCH V2] add test suite hotplug xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite ip_pipeline_config_file xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add Test Suite l2fwd-jobstats xu,gang
2016-11-22 6:39 ` [dts] [PATCH V2] add test suite packet ordering xu,gang
2016-11-23 5:05 ` [dts] [PATCH V2] add test suite hotplug Liu, Yong
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).