From: Ke Xu <ke1.xu@intel.com>
To: dts@dpdk.org
Cc: ke1.xu@intel.com, yux.jiang@intel.com, lijuan.tu@intel.com,
qi.fu@intel.com
Subject: [DTS][PATCH V1 3/5] tests/vf_offload: improve vector path validating.
Date: Tue, 7 Feb 2023 11:23:11 +0800 [thread overview]
Message-ID: <20230207032313.404935-4-ke1.xu@intel.com> (raw)
In-Reply-To: <20230207032313.404935-1-ke1.xu@intel.com>
For better deployment for daily regression, we introduce
wrapped cases for each path.
Signed-off-by: Ke Xu <ke1.xu@intel.com>
---
tests/TestSuite_vf_offload.py | 152 +++++++++++++++++++++++++++++++++-
1 file changed, 151 insertions(+), 1 deletion(-)
diff --git a/tests/TestSuite_vf_offload.py b/tests/TestSuite_vf_offload.py
index bd412100..93b28afd 100644
--- a/tests/TestSuite_vf_offload.py
+++ b/tests/TestSuite_vf_offload.py
@@ -185,10 +185,18 @@ class TestVfOffload(TestCase):
def launch_testpmd(self, **kwargs):
dcf_flag = kwargs.get("dcf_flag")
+ eal_param = self.eal_para if hasattr(self, "eal_para") else ""
+ eal_param += (
+ " --force-max-simd-bitwidth=%d " % self.specific_bitwidth
+ if hasattr(self, "specific_bitwidth")
+ and not "force-max-simd-bitwidth" in eal_param
+ else ""
+ )
param = kwargs.get("param") if kwargs.get("param") else ""
if dcf_flag == "enable":
self.vm0_testpmd.start_testpmd(
VM_CORES_MASK,
+ eal_param=eal_param,
param=param,
ports=[self.vf0_guest_pci, self.vf1_guest_pci],
port_options={
@@ -197,7 +205,9 @@ class TestVfOffload(TestCase):
},
)
else:
- self.vm0_testpmd.start_testpmd(VM_CORES_MASK, param=param)
+ self.vm0_testpmd.start_testpmd(
+ VM_CORES_MASK, eal_param=eal_param, param=param
+ )
def checksum_enablehw(self, port, dut):
dut.send_expect("port stop all", "testpmd>")
@@ -812,6 +822,106 @@ class TestVfOffload(TestCase):
self.verify(len(result) == 0, ",".join(list(result.values())))
+ def test_checksum_offload_enable_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_checksum_offload_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_enable_sse(self):
+ self.specific_bitwidth = 128
+ self.test_checksum_offload_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_enable_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_checksum_offload_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_enable_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_checksum_offload_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_enable_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_checksum_offload_vlan_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_enable_sse(self):
+ self.specific_bitwidth = 128
+ self.test_checksum_offload_vlan_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_enable_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_checksum_offload_vlan_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_enable_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_checksum_offload_vlan_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_tunnel_enable_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_checksum_offload_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_tunnel_enable_sse(self):
+ self.specific_bitwidth = 128
+ self.test_checksum_offload_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_tunnel_enable_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_checksum_offload_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_tunnel_enable_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_checksum_offload_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_tunnel_enable_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_checksum_offload_vlan_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_tunnel_enable_sse(self):
+ self.specific_bitwidth = 128
+ self.test_checksum_offload_vlan_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_tunnel_enable_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_checksum_offload_vlan_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_vlan_tunnel_enable_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_checksum_offload_vlan_tunnel_enable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_disable_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_checksum_offload_disable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_disable_sse(self):
+ self.specific_bitwidth = 128
+ self.test_checksum_offload_disable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_disable_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_checksum_offload_disable()
+ del self.specific_bitwidth
+
+ def test_checksum_offload_disable_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_checksum_offload_disable()
+ del self.specific_bitwidth
+
def tcpdump_start_sniffing(self, ifaces=[]):
"""
Start tcpdump in the background to sniff the tester interface where
@@ -1158,6 +1268,46 @@ class TestVfOffload(TestCase):
outer_pkts=pkts_outer,
)
+ def test_tso_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_tso()
+ del self.specific_bitwidth
+
+ def test_tso_sse(self):
+ self.specific_bitwidth = 128
+ self.test_tso()
+ del self.specific_bitwidth
+
+ def test_tso_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_tso()
+ del self.specific_bitwidth
+
+ def test_tso_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_tso()
+ del self.specific_bitwidth
+
+ def test_tso_tunnel_scalar(self):
+ self.specific_bitwidth = 64
+ self.test_tso_tunnel()
+ del self.specific_bitwidth
+
+ def test_tso_sse(self):
+ self.specific_bitwidth = 128
+ self.test_tso_tunnel()
+ del self.specific_bitwidth
+
+ def test_tso_avx2(self):
+ self.specific_bitwidth = 256
+ self.test_tso_tunnel()
+ del self.specific_bitwidth
+
+ def test_tso_avx512(self):
+ self.specific_bitwidth = 512
+ self.test_tso_tunnel()
+ del self.specific_bitwidth
+
def tear_down(self):
self.vm0_testpmd.execute_cmd("quit", "# ")
self.dut.send_expect(
--
2.25.1
next prev parent reply other threads:[~2023-02-07 3:25 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-07 3:23 [DTS][PATCH V1 0/5] Update vf_offload cases for DPDK-v23.03 validation Ke Xu
2023-02-07 3:23 ` [DTS][PATCH V1 1/5] tests/vf_offload: add VLAN packets to test scope Ke Xu
2023-02-07 3:23 ` [DTS][PATCH V1 2/5] tests/vf_offload: improve TSO validating Ke Xu
2023-02-07 3:23 ` Ke Xu [this message]
2023-02-07 3:23 ` [DTS][PATCH V1 4/5] tests/vf_offload: fix error when no packet captured Ke Xu
2023-02-07 3:23 ` [DTS][PATCH V1 5/5] test_plans/vf_offload: add VLAN packets to test scope and improve vector path validating Ke Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230207032313.404935-4-ke1.xu@intel.com \
--to=ke1.xu@intel.com \
--cc=dts@dpdk.org \
--cc=lijuan.tu@intel.com \
--cc=qi.fu@intel.com \
--cc=yux.jiang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).