From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id A8C0E2C6E for ; Thu, 13 Apr 2017 08:25:06 +0200 (CEST) Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 12 Apr 2017 23:25:05 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.37,193,1488873600"; d="scan'208";a="88560278" Received: from stv-crb-08.sh.intel.com (HELO dpdk-test60.sh.intel.com) ([10.239.128.48]) by fmsmga006.fm.intel.com with ESMTP; 12 Apr 2017 23:25:04 -0700 From: Xueqin Lin To: dts@dpdk.org Cc: Xueqin Lin Date: Wed, 12 Apr 2017 20:21:17 -0400 Message-Id: <1492042877-100698-1-git-send-email-xueqin.lin@intel.com> X-Mailer: git-send-email 2.5.5 Subject: [dts] [PATCH] add ddp mpls test suite and profile file X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 13 Apr 2017 06:25:07 -0000 --- dep/mpls.pkgo | Bin 0 -> 1200 bytes tests/TestSuite_ddp_mpls.py | 239 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 239 insertions(+) create mode 100644 dep/mpls.pkgo create mode 100644 tests/TestSuite_ddp_mpls.py diff --git a/dep/mpls.pkgo b/dep/mpls.pkgo new file mode 100644 index 0000000000000000000000000000000000000000..0676cabea83e9e862bdfee8fa08ff687e0fcfd15 GIT binary patch literal 1200 zcmah}&59F25U!p{!V(nXK~Ewrf)`mLn)q{(=q6c&i4NIpK!T7?(%B3;nV{gc$CjvSaNhVazgMj-{@u?>#v>_)v47jA@ zkWDenjP3b?jfz{61tS)YsM~quHX>TRy?*N^okx761BHAhLg@uN=rG(xceyvioYNDX zG3V)HQJYtD`vzYdJTQ1@ z@QuN@2Gw03)BsdJ{6M{mm-7oXJ>6oV`(X{Bg!UQ|0>pyD%aB8^MDh4tmRsaw@jM)% zenKT+kyXc;a4MWXcG|xLcar6bqwxa7N6AFuZ$qz?@h3O5@%YkL3+;J; zGJH<`krzPqw-U75K&txt`1dKyM9Z40r%KVbbHJa;kPW47ZR)cD_y0Aqz*j`2pv`Wt zQdzH5A1ChVxXK@&;|hNprYzc}Vr~-XbR0VHWP~q8O!dauT~+?5I{!X?+p7HJVO#w# mlxS|b_fK%Uz47;cv_03ZyJkZ3ymP&E*G>Hk&F;OHrtk+tO5&FQ literal 0 HcmV?d00001 diff --git a/tests/TestSuite_ddp_mpls.py b/tests/TestSuite_ddp_mpls.py new file mode 100644 index 0000000..2481c01 --- /dev/null +++ b/tests/TestSuite_ddp_mpls.py @@ -0,0 +1,239 @@ +# + +import time +import sys +import utils +from scapy.utils import rdpcap + +from qemu_kvm import QEMUKvm +from test_case import TestCase +from pmd_output import PmdOutput +from packet import Packet, sniff_packets, load_sniff_packets +from settings import get_nic_name +import random + +VM_CORES_MASK = 'all' +PF_MAX_QUEUE = 64 +VF_MAX_QUEUE = 4 + +class Testddp_mpls(TestCase): + + def set_up_all(self): + self.verify(self.nic in ['fortville_25g'], + 'ddp mpls can not support %s nic' % self.nic) + self.dut_ports = self.dut.get_ports(self.nic) + self.verify(len(self.dut_ports) >= 1, "Insufficient ports") + self.vm0 = None + self.env_done = False + profile_file = r'dep/mpls.pkgo' + profile_dst = "/tmp/" + self.dut.session.copy_file_to(profile_file, profile_dst) + + + def set_up(self): + self.setup_vm_env() + + + def bind_nic_driver(self, ports, driver=""): + if driver == "igb_uio": + for port in ports: + netdev = self.dut.ports_info[port]['port'] + driver = netdev.get_nic_driver() + if driver != 'igb_uio': + netdev.bind_driver(driver='igb_uio') + else: + for port in ports: + netdev = self.dut.ports_info[port]['port'] + driver_now = netdev.get_nic_driver() + if driver == "": + driver = netdev.default_driver + if driver != driver_now: + netdev.bind_driver(driver=driver) + + + def setup_vm_env(self, driver='igb_uio'): + """ + Create testing environment with VF generated from 1PF + """ + if self.env_done == False: + self.bind_nic_driver(self.dut_ports[:1], driver="igb_uio") + self.used_dut_port = self.dut_ports[0] + tester_port = self.tester.get_local_port(self.used_dut_port) + self.tester_intf = self.tester.get_interface(tester_port) + + self.dut.generate_sriov_vfs_by_port( + self.used_dut_port, 1, driver=driver) + self.sriov_vfs_port = self.dut.ports_info[ + self.used_dut_port]['vfs_port'] + for port in self.sriov_vfs_port: + port.bind_driver('pci-stub') + time.sleep(1) + self.dut_testpmd = PmdOutput(self.dut) + time.sleep(1) + vf0_prop = {'opt_host': self.sriov_vfs_port[0].pci} + + # set up VM0 ENV + self.vm0 = QEMUKvm(self.dut, 'vm0', 'ddp_mpls') + self.vm0.set_vm_device(driver='pci-assign', **vf0_prop) + try: + self.vm0_dut = self.vm0.start() + if self.vm0_dut is None: + raise Exception("Set up VM0 ENV failed!") + except Exception as e: + self.destroy_vm_env() + raise Exception(e) + + self.vm0_dut_ports = self.vm0_dut.get_ports('any') + self.vm0_testpmd = PmdOutput(self.vm0_dut) + self.env_done = True + + self.dut_testpmd.start_testpmd( + "Default","--port-topology=chained --txq=%s --rxq=%s" + % (PF_MAX_QUEUE, PF_MAX_QUEUE)) + self.vm0_testpmd.start_testpmd( + VM_CORES_MASK,"--port-topology=chained --txq=%s --rxq=%s" + % (VF_MAX_QUEUE, VF_MAX_QUEUE)) + + + def destroy_vm_env(self): + + if getattr(self, 'vm0', None): + self.vm0_dut.kill_all() + self.vm0_testpmd = None + self.vm0_dut_ports = None + # destroy vm0 + self.vm0.stop() + self.vm0 = None + + if getattr(self, 'used_dut_port', None): + self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) + port = self.dut.ports_info[self.used_dut_port]['port'] + self.used_dut_port = None + + self.env_done = False + + + def load_profile(self): + """ + Load profile to update FVL configuration tables, profile will be + stored in binary file and need to be passed to AQ to program FVL + during initialization stage. + """ + self.dut_testpmd.execute_cmd('port stop all') + time.sleep(1) + out = self.dut_testpmd.execute_cmd('ddp get list 0') + self.verify("Profile number is: 0" in out, + "Failed to get ddp profile info list!!!") + self.dut_testpmd.execute_cmd('ddp add 0 /tmp/mpls.pkgo') + out = self.dut_testpmd.execute_cmd('ddp get list 0') + self.verify("Profile number is: 1" in out, + "Failed to load ddp profile!!!") + self.dut_testpmd.execute_cmd('port start all') + time.sleep(1) + + + def mpls_test(self, port='pf', pkt='udp'): + """ + Send mpls packet to dut, reveive packet from configured queue. + Input: port type, packet type + """ + pkts = [] + if port == 'pf': + queue = random.randint(1, PF_MAX_QUEUE - 1) + self.dut_testpmd.execute_cmd('set fwd rxonly') + self.dut_testpmd.execute_cmd('set verbose 1') + self.dut_testpmd.execute_cmd('start') + else: + queue = random.randint(1, VF_MAX_QUEUE - 1) + self.vm0_testpmd.execute_cmd('set fwd rxonly') + self.vm0_testpmd.execute_cmd('set verbose 1') + self.vm0_testpmd.execute_cmd('start') + random_label = random.randint(0x0, 0xFFFFF) + label = hex(random_label) + wrong_label = hex((random_label + 2) % int(0xFFFFF)) + self.dut_testpmd.execute_cmd('flow create 0 ingress pattern eth / ipv4\ + / %s / mpls label is %s / end actions %s / queue index %d / end' + % (pkt, label, port, queue) ) + for times in range(2): + if pkt == 'udp': + pkts = {'mpls/good chksum udp': 'Ether()/IP()/UDP(dport=6635)\ + /MPLS(label=%s)/Ether()/IP()/TCP()'% label, + 'mpls/bad chksum udp': 'Ether()/IP()/UDP(chksum=0x1234,\ + dport=6635)/MPLS(label=%s)/Ether()/IP()/TCP()'% label } + else: + pkts = {'mpls/good chksum gre': 'Ether()/IP(proto=47)/GRE(proto=0x8847)\ + /MPLS(label=%s)/Ether()/IP()/UDP()'% label, + 'mpls/bad chksum gre': 'Ether()/IP(proto=47)/GRE(chksum=0x1234,\ + proto=0x8847)/MPLS(label=%s)/Ether()/IP()/UDP()'% label } + for packet_type in pkts.keys(): + self.tester.scapy_append('sendp([%s], iface="%s")' + % (pkts[packet_type], self.tester_intf)) + self.tester.scapy_execute() + if port == 'pf': + out = self.dut.get_session_output(timeout=2) + else: + out = self.vm0_dut.get_session_output(timeout=2) + + self.verify("port 0/queue %d" % queue in out, + "Failed to receive packet in this queue!!!") + self.verify("PKT_RX_L4_CKSUM_GOOD" in out,"Failed to check CKSUM!!!") + label = wrong_label + queue = 0 + + + def test_load_ddp(self): + """ + Load profile to update FVL configuration tables. + """ + self.load_profile() + + + def test_mpls_udp_pf(self): + """ + MPLS is supported by NVM with profile updated. Send mpls upd packet to PF, + check PF could receive packet using configured queue, checksum is good. + """ + self.load_profile() + self.mpls_test(port='pf', pkt='udp') + + + def test_mpls_gre_pf(self): + """ + MPLS is supported by NVM with profile updated. Send mpls gre packet to PF, + check PF could receive packet using configured queue, checksum is good. + """ + self.load_profile() + self.mpls_test(port='pf', pkt='gre') + + + def test_mpls_udp_vf(self): + """ + MPLS is supported by NVM with profile updated. Send mpls upd packet to VF, + check VF could receive packet using configured queue, checksum is good. + """ + self.load_profile() + self.mpls_test(port='vf id 0', pkt='udp') + + + def test_mpls_gre_vf(self): + """ + MPLS is supported by NVM with profile updated. Send mpls gre packet to VF, + check VF could receive packet using configured queue, checksum is good. + """ + self.load_profile() + self.mpls_test(port='vf id 0', pkt='gre') + + + def tear_down(self): + if self.vm0_testpmd: + self.dut_testpmd.execute_cmd('write reg 0 0xb8190 1') + self.dut_testpmd.execute_cmd('write reg 0 0xb8190 2') + self.vm0_testpmd.quit() + self.dut_testpmd.quit() + self.vm0_dut.kill_all() + pass + + + def tear_down_all(self): + self.destroy_vm_env() + pass -- 2.5.5