From: Phil Yang <phil.yang@arm.com>
To: dts@dpdk.org
Cc: nd@arm.com, Jianbo.Liu@arm.com, Herber.Guan@arm.com,
huilongx.xu@intel.com, phil.yang@arm.com
Subject: [dts] [PATCH v2] tests/hotplug: fix some failure cases
Date: Wed, 18 Oct 2017 16:41:35 +0800 [thread overview]
Message-ID: <1508316095-16282-1-git-send-email-phil.yang@arm.com> (raw)
In-Reply-To: <1508149361-7628-1-git-send-email-phil.yang@arm.com>
1. Updated testpmd path for multiarch.
2. Fix port binding timeout in tear-down process.
3. Add vfio-pci and vfio-pci:noiommu support.
3. Clear packets stats before packet send to avoid test errors caused by
broadcast packets.
Signed-off-by: Phil Yang <phil.yang@arm.com>
---
tests/TestSuite_hotplug.py | 23 +++++++++++++++--------
1 file changed, 15 insertions(+), 8 deletions(-)
diff --git a/tests/TestSuite_hotplug.py b/tests/TestSuite_hotplug.py
index 7704253..c0e1741 100644
--- a/tests/TestSuite_hotplug.py
+++ b/tests/TestSuite_hotplug.py
@@ -47,7 +47,7 @@ from packet import Packet, sniff_packets, load_sniff_packets
class TestPortHotPlug(TestCase):
"""
- This feature only supports igb_uio now and not support freebsd
+ This feature supports igb_uio, vfio-pci and vfio-pci:noiommu now and not support freebsd
"""
def set_up_all(self):
"""
@@ -58,6 +58,10 @@ class TestPortHotPlug(TestCase):
cores = self.dut.get_core_list("1S/4C/1T")
self.coremask = utils.create_mask(cores)
self.port = len(self.dut_ports) - 1
+ if self.drivername == "vfio-pci:noiommu":
+ self.driver_name = "vfio-pci"
+ else:
+ self.driver_name = self.drivername
def set_up(self):
"""
@@ -69,8 +73,8 @@ class TestPortHotPlug(TestCase):
"""
attach port
"""
- # dpdk hotplug discern NIC by pci bus not include domid
- self.dut.send_expect("port attach %s" % self.dut.ports_info[port]['pci'][len("0000:"):],"is attached",60)
+ # dpdk hotplug discern NIC by pci bus and include domid
+ self.dut.send_expect("port attach %s" % self.dut.ports_info[port]['pci'],"is attached",60)
self.dut.send_expect("port start %s" % port,"Configuring Port",120)
# sleep 10 seconds for fortville update link stats
time.sleep(10)
@@ -90,10 +94,10 @@ class TestPortHotPlug(TestCase):
"""
first run testpmd after attach port
"""
- cmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %s -- -i" % (self.coremask,self.dut.get_memory_channels())
+ cmd = "./%s/app/testpmd -c %s -n %s -- -i" % (self.target,self.coremask,self.dut.get_memory_channels())
self.dut.send_expect(cmd,"testpmd>",60)
session_secondary = self.dut.new_session()
- session_secondary.send_expect("./usertools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'], "#", 60)
+ session_secondary.send_expect("./usertools/dpdk-devbind.py --bind=%s %s" % (self.driver_name, self.dut.ports_info[self.port]['pci']), "#", 60)
self.dut.close_session(session_secondary)
self.attach(self.port)
self.dut.send_expect("start","testpmd>",60)
@@ -104,6 +108,7 @@ class TestPortHotPlug(TestCase):
self.dut.send_expect("start","testpmd>",60)
self.dut.send_expect("port detach %s" % self.port,"Please close port first",60)
+ self.dut.send_expect("clear port stats %s" % self.port ,"testpmd>",60)
self.send_packet(self.port)
out = self.dut.send_expect("show port stats %s" % self.port ,"testpmd>",60)
packet = re.search("RX-packets:\s*(\d*)",out)
@@ -127,14 +132,16 @@ class TestPortHotPlug(TestCase):
first attach port after run testpmd
"""
session_secondary = self.dut.new_session()
- session_secondary.send_expect("./usertools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'], "#", 60)
+ session_secondary.send_expect("./usertools/dpdk-devbind.py --bind=%s %s" % (self.driver_name, self.dut.ports_info[self.port]['pci']), "#", 60)
self.dut.close_session(session_secondary)
- cmd = "./x86_64-native-linuxapp-gcc/app/testpmd -c %s -n %s -- -i" % (self.coremask,self.dut.get_memory_channels())
+ cmd = "./%s/app/testpmd -c %s -n %s -- -i" % (self.target,self.coremask,self.dut.get_memory_channels())
self.dut.send_expect(cmd,"testpmd>",60)
self.detach(self.port)
self.attach(self.port)
+
self.dut.send_expect("start","testpmd>",60)
self.dut.send_expect("port detach %s" % self.port, "Please close port first",60)
+ self.dut.send_expect("clear port stats %s" % self.port ,"testpmd>",60)
self.send_packet(self.port)
out = self.dut.send_expect("show port stats %s" % self.port ,"testpmd>",60)
packet = re.search("RX-packets:\s*(\d*)",out)
@@ -147,8 +154,8 @@ class TestPortHotPlug(TestCase):
"""
Run after each test case.
"""
- self.dut.send_expect("./usertools/dpdk-devbind.py --bind=igb_uio %s" % self.dut.ports_info[self.port]['pci'],"#",60)
self.dut.kill_all()
+ self.dut.send_expect("./usertools/dpdk-devbind.py --bind=%s %s" % (self.driver_name, self.dut.ports_info[self.port]['pci']), "#", 60)
time.sleep(2)
--
2.7.4
next prev parent reply other threads:[~2017-10-18 8:41 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-16 10:22 [dts] [PATCH] " Phil Yang
2017-10-17 7:44 ` Jianbo Liu
2017-10-18 8:41 ` Phil Yang [this message]
2017-10-19 10:12 ` [dts] [PATCH v2] " Liu, Yong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1508316095-16282-1-git-send-email-phil.yang@arm.com \
--to=phil.yang@arm.com \
--cc=Herber.Guan@arm.com \
--cc=Jianbo.Liu@arm.com \
--cc=dts@dpdk.org \
--cc=huilongx.xu@intel.com \
--cc=nd@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).