From: Yu Jiang <yux.jiang@intel.com>
To: dts@dpdk.org
Cc: Yu Jiang <yux.jiang@intel.com>
Subject: [dts] [PATCH V1 2/2] tests/pmdrss_hash: replace legacy filter with rte flow
Date: Tue, 24 Aug 2021 13:16:05 +0800 [thread overview]
Message-ID: <1629782165-17789-3-git-send-email-yux.jiang@intel.com> (raw)
In-Reply-To: <1629782165-17789-1-git-send-email-yux.jiang@intel.com>
According to dpdk commit 81db321da("ethdev: remove legacy HASH filter type support"),
Modify pmdrss_hash test plan and script:
1, remove cases dynamic_rss_bond_config&simple_symmetric which are not support.
2, pmdrss_hash: replace legacy HASH filter with rte_flow.
Signed-off-by: Yu Jiang <yux.jiang@intel.com>
---
conf/test_case_checklist.json | 53 ---------------------
| 105 ++++++++---------------------------------
2 files changed, 20 insertions(+), 138 deletions(-)
diff --git a/conf/test_case_checklist.json b/conf/test_case_checklist.json
index 4726aac..02a222b 100644
--- a/conf/test_case_checklist.json
+++ b/conf/test_case_checklist.json
@@ -876,31 +876,6 @@
"Comments": "the nic not support this case"
}
],
- "dynamic_rss_bond_config": [
- {
- "OS": [
- "ALL"
- ],
- "NIC": [
- "powerville",
- "kawela_4",
- "springville",
- "ironpond",
- "springfountain",
- "twinpond",
- "niantic",
- "fortville_spirit_single",
- "fortpark_TLV",
- "fortpark_BASE-T",
- "foxville"
- ],
- "Target": [
- "ALL"
- ],
- "Bug ID": "",
- "Comments": "the nic not support this case,(x722 know ND issue,DPDK-15118)"
- }
- ],
"pmdrss_reta": [
{
"OS": [
@@ -1489,34 +1464,6 @@
"Comments": "nic not support this case"
}
],
- "simple_symmetric": [
- {
- "OS": [
- "ALL"
- ],
- "NIC": [
- "ALL"
- ],
- "Target": [
- "ALL"
- ],
- "Bug ID": "DPDK-8410",
- "Comments": "DPDK-8410, won't fix; skip this case"
- },
- {
- "OS": [
- "ALL"
- ],
- "NIC": [
- "foxville"
- ],
- "Target": [
- "ALL"
- ],
- "Bug ID": "",
- "Comments": "the nic not support this case"
- }
- ],
"toeplitz": [
{
"OS": [
--git a/tests/TestSuite_pmdrss_hash.py b/tests/TestSuite_pmdrss_hash.py
index 9ea6f9a..8e1b79b 100644
--- a/tests/TestSuite_pmdrss_hash.py
+++ b/tests/TestSuite_pmdrss_hash.py
@@ -453,6 +453,7 @@ class TestPmdrssHash(TestCase):
dutPorts = self.dut.get_ports(self.nic)
localPort = self.tester.get_local_port(dutPorts[0])
itf = self.tester.get_interface(localPort)
+ rule_action = 'func toeplitz queues end / end'
global reta_num
global iptypes
@@ -471,8 +472,14 @@ class TestPmdrssHash(TestCase):
"set nbcore %d" % (queue + 1), "testpmd> ")
self.dut.send_expect("port stop all", "testpmd> ")
- self.dut.send_expect(
- "set_hash_global_config 0 toeplitz %s enable" % iptype, "testpmd> ")
+ self.dut.send_expect("flow flush 0", "testpmd> ")
+ rule_cmd = f'flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}'
+ if 'sctp' in iptype or 'udp' in iptype or 'tcp' in iptype:
+ rule_cmd = rule_cmd.replace('/ ipv4 /', f'/ ipv4 / {rsstype} /')
+ if 'ipv6' in iptype:
+ rule_cmd = rule_cmd.replace('ipv4', 'ipv6')
+ outx = self.dut.send_expect(rule_cmd, "testpmd> ")
+ self.verify("created" in outx, "Create flow failed")
self.dut.send_expect("port start all", "testpmd> ")
out = self.dut.send_expect(
"port config all rss %s" % rsstype, "testpmd> ")
@@ -516,7 +523,8 @@ class TestPmdrssHash(TestCase):
rule_cmd = rule_cmd.replace('/ ipv4 /', f'/ ipv4 / {rsstype} /')
if 'ipv6' in iptype:
rule_cmd = rule_cmd.replace('ipv4', 'ipv6')
- self.dut.send_expect(rule_cmd, "testpmd> ")
+ outx = self.dut.send_expect(rule_cmd, "testpmd> ")
+ self.verify("created" in outx, "Create flow failed")
self.dut.send_expect("port start all", "testpmd> ")
out = self.dut.send_expect(
"port config all rss %s" % rsstype, "testpmd> ")
@@ -537,6 +545,7 @@ class TestPmdrssHash(TestCase):
dutPorts = self.dut.get_ports(self.nic)
localPort = self.tester.get_local_port(dutPorts[0])
itf = self.tester.get_interface(localPort)
+ rule_action = 'func simple_xor queues end / end'
global reta_num
global iptypes
@@ -557,8 +566,14 @@ class TestPmdrssHash(TestCase):
self.dut.send_expect("port stop all", "testpmd> ")
# some nic not support change hash algorithm
- self.dut.send_expect(
- "set_hash_global_config 0 simple_xor %s enable" % iptype, "testpmd> ")
+ self.dut.send_expect("flow flush 0", "testpmd> ")
+ rule_cmd = f'flow create 0 ingress pattern eth / ipv4 / end actions rss types {iptype} end queues end {rule_action}'
+ if 'sctp' in iptype or 'udp' in iptype or 'tcp' in iptype:
+ rule_cmd = rule_cmd.replace('/ ipv4 /', f'/ ipv4 / {rsstype} /')
+ if 'ipv6' in iptype:
+ rule_cmd = rule_cmd.replace('ipv4', 'ipv6')
+ outx = self.dut.send_expect(rule_cmd, "testpmd> ")
+ self.verify("created" in outx, "Create flow failed")
self.dut.send_expect("port start all", "testpmd> ")
out = self.dut.send_expect(
"port config all rss %s" % rsstype, "testpmd> ")
@@ -572,86 +587,6 @@ class TestPmdrssHash(TestCase):
self.dut.send_expect("quit", "# ", 30)
- def test_simple_symmetric(self):
-
- dutPorts = self.dut.get_ports(self.nic)
- localPort = self.tester.get_local_port(dutPorts[0])
- itf = self.tester.get_interface(localPort)
- global reta_num
- global iptypes
- self.dut.kill_all()
-
- # test with different rss queues
- self.dut.send_expect(
- "%s %s -- -i --rxq=%d --txq=%d" %
- (self.path, self.eal_para, queue, queue), "testpmd> ", 120)
-
- for iptype, rsstype in list(iptypes.items()):
- self.dut.send_expect("set verbose 8", "testpmd> ")
- self.dut.send_expect("set fwd rxonly", "testpmd> ")
- self.dut.send_expect("set promisc all off", "testpmd> ")
- self.dut.send_expect(
- "set nbcore %d" % (queue + 1), "testpmd> ")
-
- self.dut.send_expect("port stop all", "testpmd> ")
- self.dut.send_expect(
- "set_hash_global_config 0 simple_xor %s enable" % iptype, "testpmd> ")
- self.dut.send_expect(
- "set_sym_hash_ena_per_port 0 enable", "testpmd> ")
- self.dut.send_expect("port start all", "testpmd> ")
-
- out = self.dut.send_expect(
- "port config all rss %s" % rsstype, "testpmd> ")
- self.verify("error" not in out, "Configuration of RSS hash failed: Invalid argument")
- # configure the reta with specific mappings.
- for i in range(reta_num):
- reta_entries.insert(i, random.randint(0, queue - 1))
- self.dut.send_expect(
- "port config 0 rss reta (%d,%d)" % (i, reta_entries[i]), "testpmd> ")
- self.send_packet_symmetric(itf, iptype)
-
- self.dut.send_expect("quit", "# ", 30)
-
- def test_dynamic_rss_bond_config(self):
-
- # setup testpmd and finish bond config
- self.verify(self.nic in ["columbiaville_25g", "columbiaville_100g","fortville_eagle", "fortville_spirit",
- "fortpark_TLV","fortpark_BASE-T", "fortville_25g", "carlsville", "foxville"],
- "NIC Unsupported: " + str(self.nic))
-
- self.dut.send_expect("%s %s -- -i" % (self.path, self.eal_para), "testpmd> ", 120)
- self.dut.send_expect("set promisc all off", "testpmd> ")
- out = self.dut.send_expect("create bonded device 3 0", "testpmd> ", 30)
- bond_device_id = int(re.search("port \d+", out).group().split(" ")[-1].strip())
-
- self.dut.send_expect("add bonding slave 0 %d" % bond_device_id, "testpmd>", 30)
- self.dut.send_expect("add bonding slave 1 %d" % bond_device_id, "testpmd>", 30)
-
- # get slave device default rss hash algorithm
- out = self.dut.send_expect("get_hash_global_config 0", "testpmd>")
- slave0_hash_function = re.search("Hash function is .+", out).group().split(" ")[-1].strip()
- out = self.dut.send_expect("get_hash_global_config 1", "testpmd>")
- slave1_hash_function = re.search("Hash function is .+", out).group().split(" ")[-1].strip()
- self.verify(slave0_hash_function == slave1_hash_function, "default hash function not match")
-
- new_hash_function = ""
- for hash_function in ["toeplitz", "simple_xor"]:
- if slave0_hash_function[-3:].lower() != hash_function[-3:]:
- new_hash_function = hash_function
- # update slave 0 rss hash algorithm and get slave 0 and slave 1 rss new hash algorithm
- self.dut.send_expect("set_hash_global_config 0 %s ipv4-other enable" % new_hash_function, "testpmd>")
- out = self.dut.send_expect("get_hash_global_config 0", "testpmd>")
- slave0_new_hash_function = re.search("Hash function is .+", out).group().split(" ")[-1].strip()
- out = self.dut.send_expect("get_hash_global_config 1", "testpmd>")
- slave1_new_hash_function = re.search("Hash function is .+", out).group().split(" ")[-1].strip()
-
- self.verify(slave0_new_hash_function == slave1_new_hash_function, "bond slave auto sync hash function failed")
- self.verify(slave0_new_hash_function[-3:].lower() == new_hash_function[-3:], "changed slave hash function failed")
-
- self.dut.send_expect("remove bonding slave 0 %d" % bond_device_id, "testpmd>", 30)
- self.dut.send_expect("remove bonding slave 1 %d" % bond_device_id, "testpmd>", 30)
- self.dut.send_expect("quit","# ", 30)
-
def tear_down(self):
"""
Run after each test case.
--
2.7.4
next prev parent reply other threads:[~2021-08-24 5:16 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-24 5:16 [dts] [PATCH V1 0/2] pmdrss_hash: replace legacy filter with rte_flow Yu Jiang
2021-08-24 5:16 ` [dts] [PATCH V1 1/2] test_plans/pmdrss_hash: replace legacy filter with rte flow Yu Jiang
2021-08-24 5:16 ` Yu Jiang [this message]
2021-08-24 6:22 ` [dts] [PATCH V1 2/2] tests/pmdrss_hash: " Jiang, YuX
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1629782165-17789-3-git-send-email-yux.jiang@intel.com \
--to=yux.jiang@intel.com \
--cc=dts@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).