* [dts] [PATCH v1] tests/large_vf:support cvl 25G
@ 2021-01-14 3:08 Xu Hailin
2021-01-14 5:21 ` Zhao, HaiyangX
2021-01-19 6:32 ` Tu, Lijuan
0 siblings, 2 replies; 7+ messages in thread
From: Xu Hailin @ 2021-01-14 3:08 UTC (permalink / raw)
To: dts; +Cc: Hailin Xu
From: Hailin Xu <hailinx.xu@intel.com>
Add the judgment condition of cvl25g network card, because 25g is different from 100g.
1. The max number of VFS is different
2. The number of VFS that can use 256 queues is different
3. fix hard code
Signed-off-by: Hailin Xu <hailinx.xu@intel.com>
---
tests/TestSuite_large_vf.py | 120 ++++++++++++++++++++++--------------
1 file changed, 73 insertions(+), 47 deletions(-)
diff --git a/tests/TestSuite_large_vf.py b/tests/TestSuite_large_vf.py
index 77c9fc62..0cba1cdd 100755
--- a/tests/TestSuite_large_vf.py
+++ b/tests/TestSuite_large_vf.py
@@ -172,19 +172,18 @@ multi_fdir_among = {
"count": 1000
}
-more_than_4_queues_128_vfs = {
- "name": "test_more_than_4_queues_128_vfs",
+more_than_4_queues_max_vfs = {
+ "name": "test_more_than_4_queues_max_vfs",
"param": ["--txq=8 --rxq=8", "--txq=4 --rxq=4"],
"check_param": "configure queues failed"
}
-more_than_128_vfs_4_queues = {
- "name": "test_more_than_128_vfs_4_queues",
- "vf_num": [128, 129],
+more_than_max_vfs_4_queues = {
+ "name": "test_more_than_max_vfs_4_queues",
"check_param": "-bash: echo: write error: Numerical result out of range"
}
-max_vfs_4_queues_128 = [multi_fdir_among, more_than_4_queues_128_vfs, more_than_128_vfs_4_queues]
+max_vfs_4_queues_128 = [multi_fdir_among, more_than_4_queues_max_vfs, more_than_max_vfs_4_queues]
class TestLargeVf(TestCase):
@@ -203,6 +202,7 @@ class TestLargeVf(TestCase):
self.used_dut_port = self.dut_ports[0]
self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]['intf']
self.pf0_pci = self.dut.ports_info[self.dut_ports[0]]['pci']
+ self.max_vf_num = int(self.dut.send_expect('cat /sys/bus/pci/devices/%s/sriov_totalvfs' % self.pf0_pci, '#'))
self.pf0_mac = self.dut.get_mac_address(0)
self.vf_flag = False
@@ -213,6 +213,9 @@ class TestLargeVf(TestCase):
self.pkt = Packet()
self.pmd_output = PmdOutput(self.dut)
+ self.app_path = self.dut.apps_name["test-pmd"]
+ self.vf_num = 7 if self.max_vf_num > 128 else 3
+
def set_up(self):
"""
Run before each test case.
@@ -241,11 +244,20 @@ class TestLargeVf(TestCase):
self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
self.vf_flag = False
- def launch_testpmd(self, param, total=False):
+ def launch_testpmd(self, param, total=False, retry_times=3):
if total:
param = param + " --total-num-mbufs=500000"
- self.pmd_output.start_testpmd("all", param=param,
- ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
+ while retry_times:
+ try:
+ self.pmd_output.start_testpmd("all", param=param,
+ ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
+ break
+ except Exception as e:
+ self.logger.info('start testpmd occurred exception: {}'.format(e))
+ retry_times = retry_times - 1
+ time.sleep(1)
+ self.logger.info('try start testpmd the {} times'.format(retry_times))
+
def config_testpmd(self):
self.pmd_output.execute_cmd("set verbose 1")
@@ -289,55 +301,64 @@ class TestLargeVf(TestCase):
self.create_fdir_rule(vectors[0]["rule"])
self.check_match_mismatch_pkts(vectors[0])
elif subcase_name == "test_pf_large_vf_fdir_coexist":
- pmdout = PmdOutput(self.dut, self.session_list[0])
- self.create_pf_rule(pmdout, self.pf0_intf, tv["param"][0], tv["param"][1])
- self.send_pkts_pf_check(pmdout, self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"], tv["count"])
+ self.create_pf_rule(self.pmdout_list[0], self.pf0_intf, tv["param"][0], tv["param"][1])
+ self.send_pkts_pf_check(self.pmdout_list[0], self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"], tv["count"])
self.create_fdir_rule(vectors[0]["rule"])
self.check_match_mismatch_pkts(vectors[0])
- self.destroy_pf_rule(pmdout, self.pf0_intf)
+ self.destroy_pf_rule(self.pmdout_list[0], self.pf0_intf)
elif subcase_name == "test_exceed_256_queues":
self.pmd_output.execute_cmd("quit", "#")
- eal_param = "-w {} --file-prefix=port0vf0 -- -i ".format(self.sriov_vfs_port[0].pci)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv["param"][0]
+ eal_param = self.dut.create_eal_parameters(prefix='port0vf0', ports=[self.sriov_vfs_port[0].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"][0]
out = self.pmd_output.execute_cmd(cmd, "# ")
self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
- self.pmd_output.execute_cmd("quit", "#")
self.launch_testpmd(tv["param"][1])
self.check_rxqtxq_number(512, tv["check_param"])
elif subcase_name == "test_more_than_3_vfs_256_queues":
self.pmd_output.execute_cmd("quit", "#")
- self.destroy_iavf()
- self.create_iavf(4)
- # start 4 testpmd uss 256 queues
- for i in range(4):
- if i < 3:
- eal_param = "-w {} --file-prefix=port0vf{} -- -i ".format(self.sriov_vfs_port[i].pci, i)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv["param"]
- self.session_list[i].send_expect(cmd, "testpmd> ")
- else:
- # start fourth testpmd failed
- eal_param = "-w {} --file-prefix=port0vf3 -- -i ".format(self.sriov_vfs_port[3].pci)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv[
- "param"]
- out = self.dut.send_command(cmd)
+ # start testpmd uss 256 queues
+ for i in range(self.vf_num + 1):
+ if self.max_vf_num == 64:
+ self.pmdout_list[0].start_testpmd(param=tv["param"], ports=[self.sriov_vfs_port[0].pci], prefix='port0vf0')
+ eal_param = self.dut.create_eal_parameters(fixed_prefix=True, ports=[self.sriov_vfs_port[1].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"]
+ out = self.pmd_output.execute_cmd(cmd, "#")
self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
- # quit all testpmd
- self.session_list[0].send_expect("quit", "# ")
- self.session_list[1].send_expect("quit", "# ")
- self.session_list[2].send_expect("quit", "# ")
+ self.pmdout_list[0].execute_cmd("quit", "# ")
+ break
+ else:
+ if i < self.vf_num:
+ self.pmdout_list[i].start_testpmd(param=tv["param"], ports=[self.sriov_vfs_port[i].pci],
+ prefix='port0vf{}'.format(i))
+ else:
+ # start fourth testpmd failed
+ eal_param = self.dut.create_eal_parameters(fixed_prefix=True, ports=[self.sriov_vfs_port[-1].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"]
+ out = self.pmd_output.execute_cmd(cmd, "#")
+ self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
+ # quit all testpmd
+ self.pmdout_list[0].execute_cmd("quit", "# ")
+ self.pmdout_list[1].execute_cmd("quit", "# ")
+ self.pmdout_list[2].execute_cmd("quit", "# ")
+ if self.vf_num > 3:
+ self.pmdout_list[3].execute_cmd("quit", "# ")
+ self.pmdout_list[4].execute_cmd("quit", "# ")
+ self.pmdout_list[5].execute_cmd("quit", "# ")
+ self.pmdout_list[6].execute_cmd("quit", "# ")
+
# case 2: 128_vfs_4_queues
elif subcase_name == "test_multi_fdir_among":
self.create_fdir_rule(tv["rule"])
self.check_match_mismatch_pkts(tv)
- elif subcase_name == "test_more_than_128_vfs_4_queues":
+ elif subcase_name == "test_more_than_max_vfs_4_queues":
self.pmd_output.execute_cmd("quit", "#")
out = self.dut.send_expect("echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format(
- tv["vf_num"][0], self.pf0_pci), "# ")
- self.verify(tv["check_param"] not in out, "fail: create vfs successfully")
+ self.max_vf_num, self.pf0_pci), "# ")
+ self.verify(tv["check_param"] not in out, "fail: create vfs failed")
out = self.dut.send_expect("echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format(
- tv["vf_num"][1], self.pf0_pci), "# ")
+ self.max_vf_num + 1, self.pf0_pci), "# ")
self.verify(tv["check_param"] in out, "fail: create vfs successfully")
- elif subcase_name == "test_more_than_4_queues_128_vfs":
+ elif subcase_name == "test_more_than_4_queues_max_vfs":
self.pmd_output.execute_cmd("quit", "# ")
out = self.pmd_output.start_testpmd("all", param=tv["param"][0],
ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
@@ -407,6 +428,7 @@ class TestLargeVf(TestCase):
packet = "Ether(dst='{}')/IP(src=RandIP(),dst='192.168.0.{}')/UDP(sport=22,dport=23)/Raw('x'*80)".format(pf_mac, ip)
self.send_packets(packet, 1)
ip += 1
+ time.sleep(1)
out = pmdout.execute_cmd("ethtool -S %s" % pf_intf, "# ")
for queue in range(check_param[0], check_param[1]+1):
packet_str = "rx_queue_%d_packets: (\d+)" % queue
@@ -503,18 +525,21 @@ class TestLargeVf(TestCase):
self.check_txonly_pkts(rxtx_num)
def test_3_vfs_256_queues(self):
- self.session_list = []
- for i in range(3):
- name = self.dut.new_session()
- self.session_list.append(name)
- self.create_iavf(3)
+ self.pmdout_list = []
+ session_list = []
+ for i in range(self.vf_num):
+ session = self.dut.new_session()
+ session_list.append(session)
+ pmdout = PmdOutput(self.dut, session)
+ self.pmdout_list.append(pmdout)
+ self.create_iavf(self.vf_num + 1)
self.launch_testpmd("--rxq=256 --txq=256", total=True)
self.config_testpmd()
self.rte_flow_process(max_vfs_256_queues_3)
- self.dut.close_session(self.session_list)
+ self.dut.close_session(session_list)
- def test_128_vfs_4_queues(self):
- self.create_iavf(128)
+ def test_max_vfs_4_queues(self):
+ self.create_iavf(self.max_vf_num)
self.launch_testpmd("--rxq=4 --txq=4")
self.config_testpmd()
self.rte_flow_process(max_vfs_4_queues_128)
@@ -524,6 +549,7 @@ class TestLargeVf(TestCase):
Run after each test case.
"""
self.pmd_output.execute_cmd("quit", "#")
+ self.dut.kill_all()
self.destroy_iavf()
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH v1] tests/large_vf:support cvl 25G
2021-01-14 3:08 [dts] [PATCH v1] tests/large_vf:support cvl 25G Xu Hailin
@ 2021-01-14 5:21 ` Zhao, HaiyangX
2021-01-19 6:32 ` Tu, Lijuan
1 sibling, 0 replies; 7+ messages in thread
From: Zhao, HaiyangX @ 2021-01-14 5:21 UTC (permalink / raw)
To: Xu, HailinX, dts; +Cc: Xu, HailinX
Acked-by: Haiyang Zhao <haiyangx.zhao@intel.com>
Best Regards,
Zhao Haiyang
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xu Hailin
> Sent: Thursday, January 14, 2021 11:09
> To: dts@dpdk.org
> Cc: Xu, HailinX <hailinx.xu@intel.com>
> Subject: [dts] [PATCH v1] tests/large_vf:support cvl 25G
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH v1] tests/large_vf:support cvl 25G
2021-01-14 3:08 [dts] [PATCH v1] tests/large_vf:support cvl 25G Xu Hailin
2021-01-14 5:21 ` Zhao, HaiyangX
@ 2021-01-19 6:32 ` Tu, Lijuan
1 sibling, 0 replies; 7+ messages in thread
From: Tu, Lijuan @ 2021-01-19 6:32 UTC (permalink / raw)
To: Xu, HailinX, dts; +Cc: Xu, HailinX
It's great to see get max vf numbers from system api, not use hard code.
Could you please update notes too?
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xu Hailin
> Sent: 2021年1月14日 11:09
> To: dts@dpdk.org
> Cc: Xu, HailinX <hailinx.xu@intel.com>
> Subject: [dts] [PATCH v1] tests/large_vf:support cvl 25G
>
> From: Hailin Xu <hailinx.xu@intel.com>
>
> Add the judgment condition of cvl25g network card, because 25g is different
> from 100g.
> 1. The max number of VFS is different
> 2. The number of VFS that can use 256 queues is different
> 3. fix hard code
>
> Signed-off-by: Hailin Xu <hailinx.xu@intel.com>
> ---
> tests/TestSuite_large_vf.py | 120 ++++++++++++++++++++++--------------
> 1 file changed, 73 insertions(+), 47 deletions(-)
>
> diff --git a/tests/TestSuite_large_vf.py b/tests/TestSuite_large_vf.py index
> 77c9fc62..0cba1cdd 100755
> --- a/tests/TestSuite_large_vf.py
> +++ b/tests/TestSuite_large_vf.py
> @@ -172,19 +172,18 @@ multi_fdir_among = {
> "count": 1000
> }
>
> -more_than_4_queues_128_vfs = {
> - "name": "test_more_than_4_queues_128_vfs",
> +more_than_4_queues_max_vfs = {
> + "name": "test_more_than_4_queues_max_vfs",
> "param": ["--txq=8 --rxq=8", "--txq=4 --rxq=4"],
> "check_param": "configure queues failed"
> }
>
> -more_than_128_vfs_4_queues = {
> - "name": "test_more_than_128_vfs_4_queues",
> - "vf_num": [128, 129],
> +more_than_max_vfs_4_queues = {
> + "name": "test_more_than_max_vfs_4_queues",
> "check_param": "-bash: echo: write error: Numerical result out of range"
> }
>
> -max_vfs_4_queues_128 = [multi_fdir_among,
> more_than_4_queues_128_vfs, more_than_128_vfs_4_queues]
> +max_vfs_4_queues_128 = [multi_fdir_among,
> more_than_4_queues_max_vfs,
> +more_than_max_vfs_4_queues]
>
>
> class TestLargeVf(TestCase):
> @@ -203,6 +202,7 @@ class TestLargeVf(TestCase):
> self.used_dut_port = self.dut_ports[0]
> self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]['intf']
> self.pf0_pci = self.dut.ports_info[self.dut_ports[0]]['pci']
> + self.max_vf_num = int(self.dut.send_expect('cat
> + /sys/bus/pci/devices/%s/sriov_totalvfs' % self.pf0_pci, '#'))
> self.pf0_mac = self.dut.get_mac_address(0)
>
> self.vf_flag = False
> @@ -213,6 +213,9 @@ class TestLargeVf(TestCase):
> self.pkt = Packet()
> self.pmd_output = PmdOutput(self.dut)
>
> + self.app_path = self.dut.apps_name["test-pmd"]
> + self.vf_num = 7 if self.max_vf_num > 128 else 3
> +
> def set_up(self):
> """
> Run before each test case.
> @@ -241,11 +244,20 @@ class TestLargeVf(TestCase):
> self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
> self.vf_flag = False
>
> - def launch_testpmd(self, param, total=False):
> + def launch_testpmd(self, param, total=False, retry_times=3):
> if total:
> param = param + " --total-num-mbufs=500000"
> - self.pmd_output.start_testpmd("all", param=param,
> - ports=[self.sriov_vfs_port[0].pci],
> socket=self.ports_socket)
> + while retry_times:
> + try:
> + self.pmd_output.start_testpmd("all", param=param,
> + ports=[self.sriov_vfs_port[0].pci],
> socket=self.ports_socket)
> + break
> + except Exception as e:
> + self.logger.info('start testpmd occurred exception: {}'.format(e))
> + retry_times = retry_times - 1
> + time.sleep(1)
> + self.logger.info('try start testpmd the {}
> + times'.format(retry_times))
> +
>
> def config_testpmd(self):
> self.pmd_output.execute_cmd("set verbose 1") @@ -289,55 +301,64
> @@ class TestLargeVf(TestCase):
> self.create_fdir_rule(vectors[0]["rule"])
> self.check_match_mismatch_pkts(vectors[0])
> elif subcase_name == "test_pf_large_vf_fdir_coexist":
> - pmdout = PmdOutput(self.dut, self.session_list[0])
> - self.create_pf_rule(pmdout, self.pf0_intf, tv["param"][0],
> tv["param"][1])
> - self.send_pkts_pf_check(pmdout, self.pf0_intf, self.pf0_mac,
> tv["param"][0], tv["check_param"], tv["count"])
> + self.create_pf_rule(self.pmdout_list[0], self.pf0_intf,
> tv["param"][0], tv["param"][1])
> + self.send_pkts_pf_check(self.pmdout_list[0],
> + self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"],
> + tv["count"])
> self.create_fdir_rule(vectors[0]["rule"])
> self.check_match_mismatch_pkts(vectors[0])
> - self.destroy_pf_rule(pmdout, self.pf0_intf)
> + self.destroy_pf_rule(self.pmdout_list[0],
> + self.pf0_intf)
> elif subcase_name == "test_exceed_256_queues":
> self.pmd_output.execute_cmd("quit", "#")
> - eal_param = "-w {} --file-prefix=port0vf0 -- -i
> ".format(self.sriov_vfs_port[0].pci)
> - cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4
> -n 4 " + eal_param + tv["param"][0]
> + eal_param = self.dut.create_eal_parameters(prefix='port0vf0',
> ports=[self.sriov_vfs_port[0].pci])
> + cmd = "{} ".format(self.app_path) + eal_param + "--
> + -i " + tv["param"][0]
> out = self.pmd_output.execute_cmd(cmd, "# ")
> self.verify(tv["check_param"] in out, "fail: testpmd start
> successfully")
> - self.pmd_output.execute_cmd("quit", "#")
> self.launch_testpmd(tv["param"][1])
> self.check_rxqtxq_number(512, tv["check_param"])
> elif subcase_name == "test_more_than_3_vfs_256_queues":
> self.pmd_output.execute_cmd("quit", "#")
> - self.destroy_iavf()
> - self.create_iavf(4)
> - # start 4 testpmd uss 256 queues
Spelling error.
> - for i in range(4):
> - if i < 3:
> - eal_param = "-w {} --file-prefix=port0vf{} -- -i
> ".format(self.sriov_vfs_port[i].pci, i)
> - cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l
> 1,2,3,4 -n 4 " + eal_param + tv["param"]
> - self.session_list[i].send_expect(cmd, "testpmd> ")
> - else:
> - # start fourth testpmd failed
> - eal_param = "-w {} --file-prefix=port0vf3 -- -i
> ".format(self.sriov_vfs_port[3].pci)
> - cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l
> 1,2,3,4 -n 4 " + eal_param + tv[
> - "param"]
> - out = self.dut.send_command(cmd)
> + # start testpmd uss 256 queues
> + for i in range(self.vf_num + 1):
> + if self.max_vf_num == 64:
> + self.pmdout_list[0].start_testpmd(param=tv["param"],
> ports=[self.sriov_vfs_port[0].pci], prefix='port0vf0')
> + eal_param =
> self.dut.create_eal_parameters(fixed_prefix=True,
> ports=[self.sriov_vfs_port[1].pci])
> + cmd = "{} ".format(self.app_path) + eal_param + "-- -i " +
> tv["param"]
> + out = self.pmd_output.execute_cmd(cmd, "#")
> self.verify(tv["check_param"] in out, "fail: testpmd start
> successfully")
> - # quit all testpmd
> - self.session_list[0].send_expect("quit", "# ")
> - self.session_list[1].send_expect("quit", "# ")
> - self.session_list[2].send_expect("quit", "# ")
> + self.pmdout_list[0].execute_cmd("quit", "# ")
> + break
> + else:
> + if i < self.vf_num:
> + self.pmdout_list[i].start_testpmd(param=tv["param"],
> ports=[self.sriov_vfs_port[i].pci],
> + prefix='port0vf{}'.format(i))
> + else:
> + # start fourth testpmd failed
> + eal_param =
> self.dut.create_eal_parameters(fixed_prefix=True,
> ports=[self.sriov_vfs_port[-1].pci])
> + cmd = "{} ".format(self.app_path) + eal_param + "-- -i " +
> tv["param"]
> + out = self.pmd_output.execute_cmd(cmd, "#")
> + self.verify(tv["check_param"] in out, "fail: testpmd start
> successfully")
> + # quit all testpmd
> + self.pmdout_list[0].execute_cmd("quit", "# ")
> + self.pmdout_list[1].execute_cmd("quit", "# ")
> + self.pmdout_list[2].execute_cmd("quit", "# ")
> + if self.vf_num > 3:
> + self.pmdout_list[3].execute_cmd("quit", "# ")
> + self.pmdout_list[4].execute_cmd("quit", "# ")
> + self.pmdout_list[5].execute_cmd("quit", "# ")
> +
> + self.pmdout_list[6].execute_cmd("quit", "# ")
> +
> # case 2: 128_vfs_4_queues
128_vfs_4_queues should be updated too.
> elif subcase_name == "test_multi_fdir_among":
> self.create_fdir_rule(tv["rule"])
> self.check_match_mismatch_pkts(tv)
> - elif subcase_name == "test_more_than_128_vfs_4_queues":
> + elif subcase_name == "test_more_than_max_vfs_4_queues":
> self.pmd_output.execute_cmd("quit", "#")
> out = self.dut.send_expect("echo {} >
> /sys/bus/pci/devices/{}/sriov_numvfs".format(
> - tv["vf_num"][0], self.pf0_pci), "# ")
> - self.verify(tv["check_param"] not in out, "fail: create vfs
> successfully")
> + self.max_vf_num, self.pf0_pci), "# ")
> + self.verify(tv["check_param"] not in out, "fail:
> + create vfs failed")
> out = self.dut.send_expect("echo {} >
> /sys/bus/pci/devices/{}/sriov_numvfs".format(
> - tv["vf_num"][1], self.pf0_pci), "# ")
> + self.max_vf_num + 1, self.pf0_pci), "# ")
> self.verify(tv["check_param"] in out, "fail: create vfs successfully")
> - elif subcase_name == "test_more_than_4_queues_128_vfs":
> + elif subcase_name == "test_more_than_4_queues_max_vfs":
> self.pmd_output.execute_cmd("quit", "# ")
> out = self.pmd_output.start_testpmd("all",
> param=tv["param"][0],
> ports=[self.sriov_vfs_port[0].pci],
> socket=self.ports_socket) @@ -407,6 +428,7 @@ class TestLargeVf(TestCase):
> packet =
> "Ether(dst='{}')/IP(src=RandIP(),dst='192.168.0.{}')/UDP(sport=22,dport=23)/Ra
> w('x'*80)".format(pf_mac, ip)
> self.send_packets(packet, 1)
> ip += 1
> + time.sleep(1)
> out = pmdout.execute_cmd("ethtool -S %s" % pf_intf, "# ")
> for queue in range(check_param[0], check_param[1]+1):
> packet_str = "rx_queue_%d_packets: (\d+)" % queue @@ -503,18
> +525,21 @@ class TestLargeVf(TestCase):
> self.check_txonly_pkts(rxtx_num)
>
> def test_3_vfs_256_queues(self):
> - self.session_list = []
> - for i in range(3):
> - name = self.dut.new_session()
> - self.session_list.append(name)
> - self.create_iavf(3)
> + self.pmdout_list = []
> + session_list = []
> + for i in range(self.vf_num):
> + session = self.dut.new_session()
> + session_list.append(session)
> + pmdout = PmdOutput(self.dut, session)
> + self.pmdout_list.append(pmdout)
> + self.create_iavf(self.vf_num + 1)
> self.launch_testpmd("--rxq=256 --txq=256", total=True)
> self.config_testpmd()
> self.rte_flow_process(max_vfs_256_queues_3)
> - self.dut.close_session(self.session_list)
> + self.dut.close_session(session_list)
>
> - def test_128_vfs_4_queues(self):
> - self.create_iavf(128)
> + def test_max_vfs_4_queues(self):
> + self.create_iavf(self.max_vf_num)
> self.launch_testpmd("--rxq=4 --txq=4")
> self.config_testpmd()
> self.rte_flow_process(max_vfs_4_queues_128)
> @@ -524,6 +549,7 @@ class TestLargeVf(TestCase):
> Run after each test case.
> """
> self.pmd_output.execute_cmd("quit", "#")
> + self.dut.kill_all()
> self.destroy_iavf()
>
>
> --
> 2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dts] [PATCH v1] tests/large_vf:support cvl 25G
@ 2021-01-28 7:50 Hailin Xu
0 siblings, 0 replies; 7+ messages in thread
From: Hailin Xu @ 2021-01-28 7:50 UTC (permalink / raw)
To: dts; +Cc: Hailin Xu
Add the judgment condition of cvl25g network card, because 25g is different from 100g.
1. The max number of VFS is different
2. The number of VFS that can use 256 queues is different
3. fix hard code
---
tests/TestSuite_large_vf.py | 135 +++++++++++++++++++++---------------
1 file changed, 81 insertions(+), 54 deletions(-)
diff --git a/tests/TestSuite_large_vf.py b/tests/TestSuite_large_vf.py
index 77c9fc62..6ff67a22 100755
--- a/tests/TestSuite_large_vf.py
+++ b/tests/TestSuite_large_vf.py
@@ -172,19 +172,18 @@ multi_fdir_among = {
"count": 1000
}
-more_than_4_queues_128_vfs = {
- "name": "test_more_than_4_queues_128_vfs",
+more_than_4_queues_max_vfs = {
+ "name": "test_more_than_4_queues_max_vfs",
"param": ["--txq=8 --rxq=8", "--txq=4 --rxq=4"],
"check_param": "configure queues failed"
}
-more_than_128_vfs_4_queues = {
- "name": "test_more_than_128_vfs_4_queues",
- "vf_num": [128, 129],
+more_than_max_vfs_4_queues = {
+ "name": "test_more_than_max_vfs_4_queues",
"check_param": "-bash: echo: write error: Numerical result out of range"
}
-max_vfs_4_queues_128 = [multi_fdir_among, more_than_4_queues_128_vfs, more_than_128_vfs_4_queues]
+max_vfs_4_queues = [multi_fdir_among, more_than_4_queues_max_vfs, more_than_max_vfs_4_queues]
class TestLargeVf(TestCase):
@@ -203,6 +202,7 @@ class TestLargeVf(TestCase):
self.used_dut_port = self.dut_ports[0]
self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]['intf']
self.pf0_pci = self.dut.ports_info[self.dut_ports[0]]['pci']
+ self.max_vf_num = int(self.dut.send_expect('cat /sys/bus/pci/devices/%s/sriov_totalvfs' % self.pf0_pci, '#'))
self.pf0_mac = self.dut.get_mac_address(0)
self.vf_flag = False
@@ -213,6 +213,9 @@ class TestLargeVf(TestCase):
self.pkt = Packet()
self.pmd_output = PmdOutput(self.dut)
+ self.app_path = self.dut.apps_name["test-pmd"]
+ self.vf_num = 7 if self.max_vf_num > 128 else 3
+
def set_up(self):
"""
Run before each test case.
@@ -241,11 +244,20 @@ class TestLargeVf(TestCase):
self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
self.vf_flag = False
- def launch_testpmd(self, param, total=False):
+ def launch_testpmd(self, param, total=False, retry_times=3):
if total:
param = param + " --total-num-mbufs=500000"
- self.pmd_output.start_testpmd("all", param=param,
- ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
+ while retry_times:
+ try:
+ self.pmd_output.start_testpmd("all", param=param,
+ ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
+ break
+ except Exception as e:
+ self.logger.info('start testpmd occurred exception: {}'.format(e))
+ retry_times = retry_times - 1
+ time.sleep(1)
+ self.logger.info('try start testpmd the {} times'.format(retry_times))
+
def config_testpmd(self):
self.pmd_output.execute_cmd("set verbose 1")
@@ -289,55 +301,65 @@ class TestLargeVf(TestCase):
self.create_fdir_rule(vectors[0]["rule"])
self.check_match_mismatch_pkts(vectors[0])
elif subcase_name == "test_pf_large_vf_fdir_coexist":
- pmdout = PmdOutput(self.dut, self.session_list[0])
- self.create_pf_rule(pmdout, self.pf0_intf, tv["param"][0], tv["param"][1])
- self.send_pkts_pf_check(pmdout, self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"], tv["count"])
+ self.destroy_pf_rule(self.pmdout_list[0], self.pf0_intf)
+ self.create_pf_rule(self.pmdout_list[0], self.pf0_intf, tv["param"][0], tv["param"][1])
+ self.send_pkts_pf_check(self.pmdout_list[0], self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"], tv["count"])
self.create_fdir_rule(vectors[0]["rule"])
self.check_match_mismatch_pkts(vectors[0])
- self.destroy_pf_rule(pmdout, self.pf0_intf)
+ self.destroy_pf_rule(self.pmdout_list[0], self.pf0_intf)
elif subcase_name == "test_exceed_256_queues":
self.pmd_output.execute_cmd("quit", "#")
- eal_param = "-w {} --file-prefix=port0vf0 -- -i ".format(self.sriov_vfs_port[0].pci)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv["param"][0]
+ eal_param = self.dut.create_eal_parameters(prefix='port0vf0', ports=[self.sriov_vfs_port[0].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"][0]
out = self.pmd_output.execute_cmd(cmd, "# ")
self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
- self.pmd_output.execute_cmd("quit", "#")
self.launch_testpmd(tv["param"][1])
self.check_rxqtxq_number(512, tv["check_param"])
elif subcase_name == "test_more_than_3_vfs_256_queues":
self.pmd_output.execute_cmd("quit", "#")
- self.destroy_iavf()
- self.create_iavf(4)
- # start 4 testpmd uss 256 queues
- for i in range(4):
- if i < 3:
- eal_param = "-w {} --file-prefix=port0vf{} -- -i ".format(self.sriov_vfs_port[i].pci, i)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv["param"]
- self.session_list[i].send_expect(cmd, "testpmd> ")
- else:
- # start fourth testpmd failed
- eal_param = "-w {} --file-prefix=port0vf3 -- -i ".format(self.sriov_vfs_port[3].pci)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv[
- "param"]
- out = self.dut.send_command(cmd)
+ # start testpmd use 256 queues
+ for i in range(self.vf_num + 1):
+ if self.max_vf_num == 64:
+ self.pmdout_list[0].start_testpmd(param=tv["param"], ports=[self.sriov_vfs_port[0].pci], prefix='port0vf0')
+ eal_param = self.dut.create_eal_parameters(fixed_prefix=True, ports=[self.sriov_vfs_port[1].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"]
+ out = self.pmd_output.execute_cmd(cmd, "#")
self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
- # quit all testpmd
- self.session_list[0].send_expect("quit", "# ")
- self.session_list[1].send_expect("quit", "# ")
- self.session_list[2].send_expect("quit", "# ")
- # case 2: 128_vfs_4_queues
+ self.pmdout_list[0].execute_cmd("quit", "# ")
+ break
+ else:
+ if i < self.vf_num:
+ self.pmdout_list[i].start_testpmd(param=tv["param"], ports=[self.sriov_vfs_port[i].pci],
+ prefix='port0vf{}'.format(i))
+ else:
+ # start fourth testpmd failed
+ eal_param = self.dut.create_eal_parameters(fixed_prefix=True, ports=[self.sriov_vfs_port[-1].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"]
+ out = self.pmd_output.execute_cmd(cmd, "#")
+ self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
+ # quit all testpmd
+ self.pmdout_list[0].execute_cmd("quit", "# ")
+ self.pmdout_list[1].execute_cmd("quit", "# ")
+ self.pmdout_list[2].execute_cmd("quit", "# ")
+ if self.vf_num > 3:
+ self.pmdout_list[3].execute_cmd("quit", "# ")
+ self.pmdout_list[4].execute_cmd("quit", "# ")
+ self.pmdout_list[5].execute_cmd("quit", "# ")
+ self.pmdout_list[6].execute_cmd("quit", "# ")
+
+ # case 2: max_vfs_4_queues
elif subcase_name == "test_multi_fdir_among":
self.create_fdir_rule(tv["rule"])
self.check_match_mismatch_pkts(tv)
- elif subcase_name == "test_more_than_128_vfs_4_queues":
+ elif subcase_name == "test_more_than_max_vfs_4_queues":
self.pmd_output.execute_cmd("quit", "#")
out = self.dut.send_expect("echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format(
- tv["vf_num"][0], self.pf0_pci), "# ")
- self.verify(tv["check_param"] not in out, "fail: create vfs successfully")
+ self.max_vf_num, self.pf0_pci), "# ")
+ self.verify(tv["check_param"] not in out, "fail: create vfs failed")
out = self.dut.send_expect("echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format(
- tv["vf_num"][1], self.pf0_pci), "# ")
+ self.max_vf_num + 1, self.pf0_pci), "# ")
self.verify(tv["check_param"] in out, "fail: create vfs successfully")
- elif subcase_name == "test_more_than_4_queues_128_vfs":
+ elif subcase_name == "test_more_than_4_queues_max_vfs":
self.pmd_output.execute_cmd("quit", "# ")
out = self.pmd_output.start_testpmd("all", param=tv["param"][0],
ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
@@ -387,7 +409,6 @@ class TestLargeVf(TestCase):
def create_pf_rule(self,pmdout, pf_intf, ip, action):
# count: create rules number
queue_list = []
- self.validation_pf_rule(pmdout, pf_intf)
for x in range(10):
queue_list.append(action)
cmd = "ethtool -N {} flow-type udp4 dst-ip 192.168.0.{} src-port 22 action {}".format(pf_intf, ip, action)
@@ -407,6 +428,7 @@ class TestLargeVf(TestCase):
packet = "Ether(dst='{}')/IP(src=RandIP(),dst='192.168.0.{}')/UDP(sport=22,dport=23)/Raw('x'*80)".format(pf_mac, ip)
self.send_packets(packet, 1)
ip += 1
+ time.sleep(1)
out = pmdout.execute_cmd("ethtool -S %s" % pf_intf, "# ")
for queue in range(check_param[0], check_param[1]+1):
packet_str = "rx_queue_%d_packets: (\d+)" % queue
@@ -417,10 +439,11 @@ class TestLargeVf(TestCase):
rule_str = "Filter:.*?(\d+)"
out = pmdout.execute_cmd("ethtool -n %s" % pf_intf, "#")
rule_list = re.findall(rule_str, out)
- for rule in rule_list:
- cmd = "ethtool -N {} delete {}".format(pf_intf, rule)
- pmdout.execute_cmd(cmd, "#")
- self.validation_pf_rule(pmdout, pf_intf)
+ if rule_list:
+ for rule in rule_list:
+ cmd = "ethtool -N {} delete {}".format(pf_intf, rule)
+ pmdout.execute_cmd(cmd, "#")
+ self.validation_pf_rule(pmdout, pf_intf)
def check_iavf_fdir_value(self, out, check_paeam, count, stats=False):
"""
@@ -503,27 +526,31 @@ class TestLargeVf(TestCase):
self.check_txonly_pkts(rxtx_num)
def test_3_vfs_256_queues(self):
- self.session_list = []
- for i in range(3):
- name = self.dut.new_session()
- self.session_list.append(name)
- self.create_iavf(3)
+ self.pmdout_list = []
+ session_list = []
+ for i in range(self.vf_num):
+ session = self.dut.new_session()
+ session_list.append(session)
+ pmdout = PmdOutput(self.dut, session)
+ self.pmdout_list.append(pmdout)
+ self.create_iavf(self.vf_num + 1)
self.launch_testpmd("--rxq=256 --txq=256", total=True)
self.config_testpmd()
self.rte_flow_process(max_vfs_256_queues_3)
- self.dut.close_session(self.session_list)
+ self.dut.close_session(session_list)
- def test_128_vfs_4_queues(self):
- self.create_iavf(128)
+ def test_max_vfs_4_queues(self):
+ self.create_iavf(self.max_vf_num)
self.launch_testpmd("--rxq=4 --txq=4")
self.config_testpmd()
- self.rte_flow_process(max_vfs_4_queues_128)
+ self.rte_flow_process(max_vfs_4_queues)
def tear_down(self):
"""
Run after each test case.
"""
self.pmd_output.execute_cmd("quit", "#")
+ self.dut.kill_all()
self.destroy_iavf()
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dts] [PATCH v1] tests/large_vf:support cvl 25G
@ 2021-01-28 8:13 Hailin Xu
2021-01-28 8:53 ` Xu, HailinX
2021-02-02 8:54 ` Tu, Lijuan
0 siblings, 2 replies; 7+ messages in thread
From: Hailin Xu @ 2021-01-28 8:13 UTC (permalink / raw)
To: dts; +Cc: Hailin Xu
Add the judgment condition of cvl 25g network card, because 25g is different from 100g.
1. The max number of VFS is different
2. The number of VFS that can use 256 queues is different
3. fix hard code
Signed-off-by: Hailin Xu <hailinx.xu@intel.com>
---
tests/TestSuite_large_vf.py | 135 +++++++++++++++++++++---------------
1 file changed, 81 insertions(+), 54 deletions(-)
diff --git a/tests/TestSuite_large_vf.py b/tests/TestSuite_large_vf.py
index 77c9fc62..6ff67a22 100755
--- a/tests/TestSuite_large_vf.py
+++ b/tests/TestSuite_large_vf.py
@@ -172,19 +172,18 @@ multi_fdir_among = {
"count": 1000
}
-more_than_4_queues_128_vfs = {
- "name": "test_more_than_4_queues_128_vfs",
+more_than_4_queues_max_vfs = {
+ "name": "test_more_than_4_queues_max_vfs",
"param": ["--txq=8 --rxq=8", "--txq=4 --rxq=4"],
"check_param": "configure queues failed"
}
-more_than_128_vfs_4_queues = {
- "name": "test_more_than_128_vfs_4_queues",
- "vf_num": [128, 129],
+more_than_max_vfs_4_queues = {
+ "name": "test_more_than_max_vfs_4_queues",
"check_param": "-bash: echo: write error: Numerical result out of range"
}
-max_vfs_4_queues_128 = [multi_fdir_among, more_than_4_queues_128_vfs, more_than_128_vfs_4_queues]
+max_vfs_4_queues = [multi_fdir_among, more_than_4_queues_max_vfs, more_than_max_vfs_4_queues]
class TestLargeVf(TestCase):
@@ -203,6 +202,7 @@ class TestLargeVf(TestCase):
self.used_dut_port = self.dut_ports[0]
self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]['intf']
self.pf0_pci = self.dut.ports_info[self.dut_ports[0]]['pci']
+ self.max_vf_num = int(self.dut.send_expect('cat /sys/bus/pci/devices/%s/sriov_totalvfs' % self.pf0_pci, '#'))
self.pf0_mac = self.dut.get_mac_address(0)
self.vf_flag = False
@@ -213,6 +213,9 @@ class TestLargeVf(TestCase):
self.pkt = Packet()
self.pmd_output = PmdOutput(self.dut)
+ self.app_path = self.dut.apps_name["test-pmd"]
+ self.vf_num = 7 if self.max_vf_num > 128 else 3
+
def set_up(self):
"""
Run before each test case.
@@ -241,11 +244,20 @@ class TestLargeVf(TestCase):
self.dut.destroy_sriov_vfs_by_port(self.used_dut_port)
self.vf_flag = False
- def launch_testpmd(self, param, total=False):
+ def launch_testpmd(self, param, total=False, retry_times=3):
if total:
param = param + " --total-num-mbufs=500000"
- self.pmd_output.start_testpmd("all", param=param,
- ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
+ while retry_times:
+ try:
+ self.pmd_output.start_testpmd("all", param=param,
+ ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
+ break
+ except Exception as e:
+ self.logger.info('start testpmd occurred exception: {}'.format(e))
+ retry_times = retry_times - 1
+ time.sleep(1)
+ self.logger.info('try start testpmd the {} times'.format(retry_times))
+
def config_testpmd(self):
self.pmd_output.execute_cmd("set verbose 1")
@@ -289,55 +301,65 @@ class TestLargeVf(TestCase):
self.create_fdir_rule(vectors[0]["rule"])
self.check_match_mismatch_pkts(vectors[0])
elif subcase_name == "test_pf_large_vf_fdir_coexist":
- pmdout = PmdOutput(self.dut, self.session_list[0])
- self.create_pf_rule(pmdout, self.pf0_intf, tv["param"][0], tv["param"][1])
- self.send_pkts_pf_check(pmdout, self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"], tv["count"])
+ self.destroy_pf_rule(self.pmdout_list[0], self.pf0_intf)
+ self.create_pf_rule(self.pmdout_list[0], self.pf0_intf, tv["param"][0], tv["param"][1])
+ self.send_pkts_pf_check(self.pmdout_list[0], self.pf0_intf, self.pf0_mac, tv["param"][0], tv["check_param"], tv["count"])
self.create_fdir_rule(vectors[0]["rule"])
self.check_match_mismatch_pkts(vectors[0])
- self.destroy_pf_rule(pmdout, self.pf0_intf)
+ self.destroy_pf_rule(self.pmdout_list[0], self.pf0_intf)
elif subcase_name == "test_exceed_256_queues":
self.pmd_output.execute_cmd("quit", "#")
- eal_param = "-w {} --file-prefix=port0vf0 -- -i ".format(self.sriov_vfs_port[0].pci)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv["param"][0]
+ eal_param = self.dut.create_eal_parameters(prefix='port0vf0', ports=[self.sriov_vfs_port[0].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"][0]
out = self.pmd_output.execute_cmd(cmd, "# ")
self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
- self.pmd_output.execute_cmd("quit", "#")
self.launch_testpmd(tv["param"][1])
self.check_rxqtxq_number(512, tv["check_param"])
elif subcase_name == "test_more_than_3_vfs_256_queues":
self.pmd_output.execute_cmd("quit", "#")
- self.destroy_iavf()
- self.create_iavf(4)
- # start 4 testpmd uss 256 queues
- for i in range(4):
- if i < 3:
- eal_param = "-w {} --file-prefix=port0vf{} -- -i ".format(self.sriov_vfs_port[i].pci, i)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv["param"]
- self.session_list[i].send_expect(cmd, "testpmd> ")
- else:
- # start fourth testpmd failed
- eal_param = "-w {} --file-prefix=port0vf3 -- -i ".format(self.sriov_vfs_port[3].pci)
- cmd = "x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4 -n 4 " + eal_param + tv[
- "param"]
- out = self.dut.send_command(cmd)
+ # start testpmd use 256 queues
+ for i in range(self.vf_num + 1):
+ if self.max_vf_num == 64:
+ self.pmdout_list[0].start_testpmd(param=tv["param"], ports=[self.sriov_vfs_port[0].pci], prefix='port0vf0')
+ eal_param = self.dut.create_eal_parameters(fixed_prefix=True, ports=[self.sriov_vfs_port[1].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"]
+ out = self.pmd_output.execute_cmd(cmd, "#")
self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
- # quit all testpmd
- self.session_list[0].send_expect("quit", "# ")
- self.session_list[1].send_expect("quit", "# ")
- self.session_list[2].send_expect("quit", "# ")
- # case 2: 128_vfs_4_queues
+ self.pmdout_list[0].execute_cmd("quit", "# ")
+ break
+ else:
+ if i < self.vf_num:
+ self.pmdout_list[i].start_testpmd(param=tv["param"], ports=[self.sriov_vfs_port[i].pci],
+ prefix='port0vf{}'.format(i))
+ else:
+ # start fourth testpmd failed
+ eal_param = self.dut.create_eal_parameters(fixed_prefix=True, ports=[self.sriov_vfs_port[-1].pci])
+ cmd = "{} ".format(self.app_path) + eal_param + "-- -i " + tv["param"]
+ out = self.pmd_output.execute_cmd(cmd, "#")
+ self.verify(tv["check_param"] in out, "fail: testpmd start successfully")
+ # quit all testpmd
+ self.pmdout_list[0].execute_cmd("quit", "# ")
+ self.pmdout_list[1].execute_cmd("quit", "# ")
+ self.pmdout_list[2].execute_cmd("quit", "# ")
+ if self.vf_num > 3:
+ self.pmdout_list[3].execute_cmd("quit", "# ")
+ self.pmdout_list[4].execute_cmd("quit", "# ")
+ self.pmdout_list[5].execute_cmd("quit", "# ")
+ self.pmdout_list[6].execute_cmd("quit", "# ")
+
+ # case 2: max_vfs_4_queues
elif subcase_name == "test_multi_fdir_among":
self.create_fdir_rule(tv["rule"])
self.check_match_mismatch_pkts(tv)
- elif subcase_name == "test_more_than_128_vfs_4_queues":
+ elif subcase_name == "test_more_than_max_vfs_4_queues":
self.pmd_output.execute_cmd("quit", "#")
out = self.dut.send_expect("echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format(
- tv["vf_num"][0], self.pf0_pci), "# ")
- self.verify(tv["check_param"] not in out, "fail: create vfs successfully")
+ self.max_vf_num, self.pf0_pci), "# ")
+ self.verify(tv["check_param"] not in out, "fail: create vfs failed")
out = self.dut.send_expect("echo {} > /sys/bus/pci/devices/{}/sriov_numvfs".format(
- tv["vf_num"][1], self.pf0_pci), "# ")
+ self.max_vf_num + 1, self.pf0_pci), "# ")
self.verify(tv["check_param"] in out, "fail: create vfs successfully")
- elif subcase_name == "test_more_than_4_queues_128_vfs":
+ elif subcase_name == "test_more_than_4_queues_max_vfs":
self.pmd_output.execute_cmd("quit", "# ")
out = self.pmd_output.start_testpmd("all", param=tv["param"][0],
ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket)
@@ -387,7 +409,6 @@ class TestLargeVf(TestCase):
def create_pf_rule(self,pmdout, pf_intf, ip, action):
# count: create rules number
queue_list = []
- self.validation_pf_rule(pmdout, pf_intf)
for x in range(10):
queue_list.append(action)
cmd = "ethtool -N {} flow-type udp4 dst-ip 192.168.0.{} src-port 22 action {}".format(pf_intf, ip, action)
@@ -407,6 +428,7 @@ class TestLargeVf(TestCase):
packet = "Ether(dst='{}')/IP(src=RandIP(),dst='192.168.0.{}')/UDP(sport=22,dport=23)/Raw('x'*80)".format(pf_mac, ip)
self.send_packets(packet, 1)
ip += 1
+ time.sleep(1)
out = pmdout.execute_cmd("ethtool -S %s" % pf_intf, "# ")
for queue in range(check_param[0], check_param[1]+1):
packet_str = "rx_queue_%d_packets: (\d+)" % queue
@@ -417,10 +439,11 @@ class TestLargeVf(TestCase):
rule_str = "Filter:.*?(\d+)"
out = pmdout.execute_cmd("ethtool -n %s" % pf_intf, "#")
rule_list = re.findall(rule_str, out)
- for rule in rule_list:
- cmd = "ethtool -N {} delete {}".format(pf_intf, rule)
- pmdout.execute_cmd(cmd, "#")
- self.validation_pf_rule(pmdout, pf_intf)
+ if rule_list:
+ for rule in rule_list:
+ cmd = "ethtool -N {} delete {}".format(pf_intf, rule)
+ pmdout.execute_cmd(cmd, "#")
+ self.validation_pf_rule(pmdout, pf_intf)
def check_iavf_fdir_value(self, out, check_paeam, count, stats=False):
"""
@@ -503,27 +526,31 @@ class TestLargeVf(TestCase):
self.check_txonly_pkts(rxtx_num)
def test_3_vfs_256_queues(self):
- self.session_list = []
- for i in range(3):
- name = self.dut.new_session()
- self.session_list.append(name)
- self.create_iavf(3)
+ self.pmdout_list = []
+ session_list = []
+ for i in range(self.vf_num):
+ session = self.dut.new_session()
+ session_list.append(session)
+ pmdout = PmdOutput(self.dut, session)
+ self.pmdout_list.append(pmdout)
+ self.create_iavf(self.vf_num + 1)
self.launch_testpmd("--rxq=256 --txq=256", total=True)
self.config_testpmd()
self.rte_flow_process(max_vfs_256_queues_3)
- self.dut.close_session(self.session_list)
+ self.dut.close_session(session_list)
- def test_128_vfs_4_queues(self):
- self.create_iavf(128)
+ def test_max_vfs_4_queues(self):
+ self.create_iavf(self.max_vf_num)
self.launch_testpmd("--rxq=4 --txq=4")
self.config_testpmd()
- self.rte_flow_process(max_vfs_4_queues_128)
+ self.rte_flow_process(max_vfs_4_queues)
def tear_down(self):
"""
Run after each test case.
"""
self.pmd_output.execute_cmd("quit", "#")
+ self.dut.kill_all()
self.destroy_iavf()
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH v1] tests/large_vf:support cvl 25G
2021-01-28 8:13 Hailin Xu
@ 2021-01-28 8:53 ` Xu, HailinX
2021-02-02 8:54 ` Tu, Lijuan
1 sibling, 0 replies; 7+ messages in thread
From: Xu, HailinX @ 2021-01-28 8:53 UTC (permalink / raw)
To: Xu, HailinX, dts
[-- Attachment #1: Type: text/plain, Size: 307 bytes --]
Tested-by: Xu, HailinX <hailinx.xu@intel.com>
Regards,
Xu, Hailin
>-----Original Message-----
>From: Hailin Xu <hailinx.xu@intel.com>
>Sent: Thursday, January 28, 2021 4:14 PM
>To: dts@dpdk.org
>Cc: Xu, HailinX <hailinx.xu@intel.com>
>Subject: [dts][PATCH v1] tests/large_vf:support cvl 25G
[-- Attachment #2: TestLargeVf.log --]
[-- Type: application/octet-stream, Size: 25907 bytes --]
14/01/2021 10:09:07 dts:
TEST SUITE : TestLargeVf
14/01/2021 10:09:07 dts: NIC : columbiaville_25g
14/01/2021 10:09:07 dut.10.240.183.67:
14/01/2021 10:09:08 tester:
14/01/2021 10:09:08 dut.10.240.183.67: cat /sys/bus/pci/devices/0000:18:00.0/sriov_totalvfs
14/01/2021 10:09:08 dut.10.240.183.67: 64
14/01/2021 10:09:08 dut.10.240.183.67: modprobe vfio-pci
14/01/2021 10:09:08 dut.10.240.183.67:
14/01/2021 10:09:08 TestLargeVf: Test Case test_3_vfs_256_queues Begin
14/01/2021 10:09:08 dut.10.240.183.67:
14/01/2021 10:09:08 tester:
14/01/2021 10:09:13 dut.10.240.183.67: ls
14/01/2021 10:09:13 dut.10.240.183.67: 0001-kernel-linux-add-igb_uio-mod.patch ABI_VERSION app buildtoo config devtoo doc dpdk.log drivers eeprom_0.bin ethtool_eeprom_0.bin ethtool_eeprom_0_cat.bin examples iavf_vchnl.c kernel lib license MAINTAINERS Makefile meson.build meson_options.txt README showversion usertoo VERSION x86_64-native-linuxapp-gcc
14/01/2021 10:09:13 dut.10.240.183.67: usertools/dpdk-devbind.py --force --bind=ice 0000:18:00.0 0000:18:00.1 0000:18:00.2 0000:18:00.3
14/01/2021 10:09:17 dut.10.240.183.67:
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.0/vendor
14/01/2021 10:09:20 dut.10.240.183.67: 0x8086
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.0/device
14/01/2021 10:09:20 dut.10.240.183.67: 0x1889
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.0/vendor
14/01/2021 10:09:20 dut.10.240.183.67: 0x8086
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.0/device
14/01/2021 10:09:20 dut.10.240.183.67: 0x1889
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.1/vendor
14/01/2021 10:09:20 dut.10.240.183.67: 0x8086
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.1/device
14/01/2021 10:09:20 dut.10.240.183.67: 0x1889
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.1/vendor
14/01/2021 10:09:20 dut.10.240.183.67: 0x8086
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.1/device
14/01/2021 10:09:20 dut.10.240.183.67: 0x1889
14/01/2021 10:09:20 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.2/vendor
14/01/2021 10:09:21 dut.10.240.183.67: 0x8086
14/01/2021 10:09:21 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:01.2/device
14/01/2021 10:09:21 dut.10.240.183.67: 0x1889
14/01/2021 10:09:27 dut.10.240.183.67: ifconfig enp24s0f0 up
14/01/2021 10:09:27 dut.10.240.183.67:
14/01/2021 10:09:27 dut.10.240.183.67: ip link set enp24s0f0 vf 0 mac 00:11:22:33:44:55
14/01/2021 10:09:27 dut.10.240.183.67:
14/01/2021 10:09:27 dut.10.240.183.67: x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53 -n 4 -a 0000:18:01.0 --file-prefix=dpdk_49570_20210114100833 -- -i --rxq=256 --txq=256 --total-num-mbufs=500000
14/01/2021 10:09:35 dut.10.240.183.67: EAL: Detected 72 lcore(s)^M
EAL: Detected 2 NUMA nodes^M
EAL: Multi-process socket /var/run/dpdk/dpdk_49570_20210114100833/mp_socket^M
EAL: Selected IOVA mode 'VA'^M
EAL: Probing VFIO support...^M
EAL: VFIO support initialized^M
EAL: using IOMMU type 1 (Type 1)^M
EAL: Probe PCI driver: net_iavf (8086:1889) device: 0000:18:01.0 (socket 0)^M
EAL: No legacy callbacks, legacy socket not created^M
Interactive-mode selected^M
testpmd: create a new mbuf pool <mb_pool_0>: n=500000, size=2176, socket=0^M
testpmd: preferred mempool ops selected: ring_mp_mc^M
^M
Warning! port-topology=paired and odd forward ports number, the last port will pair with itself.^M
^M
Configuring Port 0 (socket 0)^M
^M
Port 0: link state change event^M
^M
Port 0: link state change event^M
^M
Port 0: link state change event^M
14/01/2021 10:09:45 TestLargeVf: ============subcase test_multi_fdir_consistent_queue_group============
14/01/2021 10:09:45 dut.10.240.183.67: flow flush 0
14/01/2021 10:09:45 dut.10.240.183.67: ^M
14/01/2021 10:09:45 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 dst is 192.168.0.21 / udp src is 22 / end actions rss queues 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 end / mark id 1 / end
14/01/2021 10:09:45 dut.10.240.183.67: ^M^M
Flow rule #0 created
14/01/2021 10:09:45 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 src is 2001::2 / udp / end actions rss queues 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 end / mark id 2 / end
14/01/2021 10:09:45 dut.10.240.183.67: ^M^M
Flow rule #1 created
14/01/2021 10:09:45 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 / tcp src is 22 / end actions rss queues 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 end / mark id 3 / end
14/01/2021 10:09:45 dut.10.240.183.67: ^M^M
Flow rule #2 created
14/01/2021 10:09:45 dut.10.240.183.67: ^M^M
Flow rule #2 created
14/01/2021 10:09:45 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 dst is 2001::2 / tcp dst is 23 / end actions rss queues 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 end / mark id 4 / end
14/01/2021 10:09:45 dut.10.240.183.67: ^M^M
Flow rule #3 created
14/01/2021 10:09:48 dut.10.240.183.67: port 0/queue 61: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0xa5a426bd - RSS queue=0x3d - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x3d^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 10: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0x645d720a - RSS queue=0xa - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0xa^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 50: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0xddc6a932 - RSS queue=0x32 - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x32^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 63: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0xe7f5023f - RSS queue=0x3f - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x3f^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 25: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0xb9f4bed9 - RSS queue=0x19 - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x19^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 5: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0xfda1d5c5 - RSS queue=0x5 - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x5^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 1: received 1 packets^M @
NIC statistics for port 0 cleared
14/01/2021 10:10:26 TestLargeVf: ============subcase test_multi_fdir_inconsistent_queue_group============
14/01/2021 10:10:26 dut.10.240.183.67: flow flush 0
14/01/2021 10:10:26 dut.10.240.183.67: ^M
14/01/2021 10:10:26 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 dst is 192.168.0.21 / udp src is 22 / end actions rss queues 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 end / mark id 1 / end
14/01/2021 10:10:26 dut.10.240.183.67: ^M^M
Flow rule #0 created
14/01/2021 10:10:26 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 src is 2001::2 / udp / end actions rss queues 80 81 82 83 84 85 86 87 end / mark id 2 / end
14/01/2021 10:10:26 dut.10.240.183.67: ^M^M
Flow rule #1 created
14/01/2021 10:10:26 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 / tcp src is 22 / end actions rss queues 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 end / mark id 3 / end
14/01/2021 10:10:26 dut.10.240.183.67: ^M^M
Flow rule #2 created
14/01/2021 10:10:26 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 dst is 2001::2 / tcp dst is 23 / end actions rss queues 252 253 254 255 end / mark id 4 / end
14/01/2021 10:10:26 dut.10.240.183.67: ^M^M
14/01/2021 10:10:26 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 / tcp src is 22 / end actions rss queues 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 end / mark id 3 / end
14/01/2021 10:10:26 dut.10.240.183.67: ^M^M
Flow rule #2 created
14/01/2021 10:10:26 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 dst is 2001::2 / tcp dst is 23 / end actions rss queues 252 253 254 255 end / mark id 4 / end
14/01/2021 10:10:26 dut.10.240.183.67: ^M^M
Flow rule #3 created
14/01/2021 10:10:29 dut.10.240.183.67: port 0/queue 16: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0x6cb209ab - RSS queue=0x10 - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x10^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 11: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0x6ba885c6 - RSS queue=0xb - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0xb^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 7: received 1 packets^M
NIC statistics for port 0 cleared
14/01/2021 10:11:07 TestLargeVf: ============subcase test_basic_rxtx============
14/01/2021 10:11:07 dut.10.240.183.67: flow flush 0
14/01/2021 10:11:07 dut.10.240.183.67: ^M
14/01/2021 10:11:07 dut.10.240.183.67: stop
14/01/2021 10:11:07 dut.10.240.183.67: ^M^M
Telling cores to ...^M
Waiting for lcores to finish...^M
^M
---------------------- Forward statistics for port 0 ----------------------^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
----------------------------------------------------------------------------^M
^M
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++^M
^M
Done.
14/01/2021 10:11:07 dut.10.240.183.67: set fwd txonly
Set txonly packet forwarding mode
14/01/2021 10:11:07 dut.10.240.183.67: start
14/01/2021 10:11:07 dut.10.240.183.67: ^M^M
txonly packet forwarding - ports=1 - cores=1 - streams=256 - NUMA support enabled, MP allocation mode: native^M
Logical Core 2 (socket 0) forwards packets on 256 streams:^M
RX P=0/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=1 (socket 0) -> TX P=0/Q=1 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=2 (socket 0) -> TX P=0/Q=2 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=3 (socket 0) -> TX P=0/Q=3 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=4 (socket 0) -> TX P=0/Q=4 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=5 (socket 0) -> TX P=0/Q=5 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=6 (socket 0) -> TX P=0/Q=6 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=7 (socket 0) -> TX P=0/Q=7 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=8 (socket 0) -> TX P=0/Q=8 (socket 0) peer=02:00:00:00:00:00^M
RX P=0/Q=9 (socket 0) -> TX P=0/Q=9 (socket 0) peer=02:00:00:00:00:00^M
NIC statistics for port 0 cleared
14/01/2021 10:11:17 TestLargeVf: ============subcase test_different_queues_switch============
14/01/2021 10:11:17 dut.10.240.183.67: stop
14/01/2021 10:11:17 dut.10.240.183.67: ^M^M
Telling cores to ...^M
Waiting for lcores to finish...^M
^M
---------------------- Forward statistics for port 0 ----------------------^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
----------------------------------------------------------------------------^M
^M
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++^M
^M
Done.
14/01/2021 10:11:17 dut.10.240.183.67: port stop all
14/01/2021 10:11:18 dut.10.240.183.67: ^M^M
Stopping ports...^M
NIC statistics for port 0 cleared
14/01/2021 10:13:55 TestLargeVf: ============subcase test_pf_large_vf_fdir_coexist============
14/01/2021 10:13:58 dut.10.240.183.67: flow flush 0
14/01/2021 10:13:58 dut.10.240.183.67: ^M
14/01/2021 10:13:58 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 dst is 192.168.0.21 / udp src is 22 / end actions rss queues 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 end / mark id 1 / end
14/01/2021 10:13:58 dut.10.240.183.67: ^M^M
Flow rule #0 created
14/01/2021 10:13:58 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 src is 2001::2 / udp / end actions rss queues 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 end / mark id 2 / end
14/01/2021 10:13:58 dut.10.240.183.67: ^M^M
Flow rule #1 created
14/01/2021 10:13:58 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 / tcp src is 22 / end actions rss queues 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 end / mark id 3 / end
14/01/2021 10:13:58 dut.10.240.183.67: ^M^M
Flow rule #2 created
NIC statistics for port 0 cleared
14/01/2021 10:14:40 TestLargeVf: ============subcase test_exceed_256_queues============
14/01/2021 10:14:40 dut.10.240.183.67: quit
14/01/2021 10:14:43 dut.10.240.183.67: ^M^M
Telling cores to stop...^M
Waiting for lcores to finish...^M
^M
---------------------- Forward statistics for port 0 ----------------------^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
----------------------------------------------------------------------------^M
^M
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++^M
^M
Done.^M
^M
Stopping port 0...^M
Stopping ports...^M
4/01/2021 10:15:01 TestLargeVf: ============subcase test_more_than_3_vfs_256_queues============
14/01/2021 10:15:01 dut.10.240.183.67: quit
14/01/2021 10:15:03 dut.10.240.183.67: ^M^M
^M
Stopping port 0...^M
Stopping ports...^M
Done^M
^M
Shutting down port 0...^M
Closing ports...^M
Port 0 is closed^M
Done^M
^M
Bye...
14/01/2021 10:15:19 dut.10.240.183.67: x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1,2 -n 4 -a 0000:18:01.1 --file-prefix=dpdk_49570_20210114100833 -- -i --txq=256 --rxq=256
14/01/2021 10:15:21 dut.10.240.183.67: EAL: Detected 72 lcore(s)^M
EAL: Detected 2 NUMA nodes^M
EAL: Multi-process socket /var/run/dpdk/dpdk_49570_20210114100833/mp_socket^M
EAL: Selected IOVA mode 'VA'^M
EAL: Probing VFIO support...^M
EAL: VFIO support initialized^M
EAL: using IOMMU type 1 (Type 1)^M
EAL: Probe PCI driver: net_iavf (8086:1889) device: 0000:18:01.1 (socket 0)^M
EAL: No legacy callbacks, legacy socket not created^M
Interactive-mode selected^M
testpmd: create a new mbuf pool <mb_pool_0>: n=155456, size=2176, socket=0^M
testpmd: preferred mempool ops selected: ring_mp_mc^M
^M
Warning! port-topology=paired and odd forward ports number, the last port will pair with itself.^M
^M
Configuring Port 0 (socket 0)^M
iavf_request_queues(): fail to execute command OP_REQUEST_QUEUES^M
iavf_queues_req_reset(): request queues from PF failed^M
Port0 dev_configure = -1^M
Fail to configure port 0^M
EAL: Error - exiting with code: 1^M
Cause: Start ports failed
14/01/2021 10:15:23 TestLargeVf: Test Case test_3_vfs_256_queues Result PASSED:
14/01/2021 10:15:23 dut.10.240.183.67: quit
14/01/2021 10:15:23 dut.10.240.183.67: bash: : command not found...^M
Similar command is: 'quot'
14/01/2021 10:15:23 dut.10.240.183.67: kill_all: called by dut and prefix list has value.
14/01/2021 10:15:29 TestLargeVf: Test Case test_max_vfs_4_queues Begin
14/01/2021 10:15:29 dut.10.240.183.67:
14/01/2021 10:15:29 tester:
14/01/2021 10:15:29 dut.10.240.183.67: ls
14/01/2021 10:15:29 dut.10.240.183.67: 0001-kernel-linux-add-igb_uio-mod.patch ABI_VERSION app buildtoo config devtoo doc dpdk.log drivers eeprom_0.bin ethtool_eeprom_0.bin ethtool_eeprom_0_cat.bin examples iavf_vchnl.c kernel lib license MAINTAINERS Makefile meson.build meson_options.txt README showversion usertoo VERSION x86_64-native-linuxapp-gcc
14/01/2021 10:15:29 dut.10.240.183.67: usertools/dpdk-devbind.py --force --bind=ice 0000:18:00.0 0000:18:00.1 0000:18:00.2 0000:18:00.3
14/01/2021 10:15:30 dut.10.240.183.67: Notice: 0000:18:00.0 already bound to driver ice, skipping^M
Notice: 0000:18:00.1 already bound to driver ice, skipping^M
Notice: 0000:18:00.2 already bound to driver ice, skipping^M
Notice: 0000:18:00.3 already bound to driver ice, skipping
14/01/2021 10:15:42 dut.10.240.183.67: cat /sys/bus/pci/devices/0000\:18\:02.3/vendor
14/01/2021 10:15:42 dut.10.240.183.67: 0x8086
TX offloads=0x10000 - TX RS bit threshold=32
14/01/2021 10:17:46 TestLargeVf: ============subcase test_multi_fdir_among============
14/01/2021 10:17:46 dut.10.240.183.67: flow flush 0
14/01/2021 10:17:46 dut.10.240.183.67: ^M
14/01/2021 10:17:46 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv4 dst is 192.168.0.21 / udp src is 22 / end actions rss queues 0 1 end / mark id 1 / end
14/01/2021 10:17:46 dut.10.240.183.67: ^M^M
Flow rule #0 created
14/01/2021 10:17:46 dut.10.240.183.67: flow create 0 ingress pattern eth / ipv6 src is 2001::2 / udp / end actions rss queues 2 3 end / mark id 2 / end
14/01/2021 10:17:46 dut.10.240.183.67: ^M^M
Flow rule #1 created
14/01/2021 10:17:49 dut.10.240.183.67: port 0/queue 1: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0x28be8ad9 - RSS queue=0x1 - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x1^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 1: received 1 packets^M
src=00:1E:67:56:C8:2B - dst=00:11:22:33:44:55 - type=0x0800 - length=122 - nb_segs=1 - RSS hash=0x4207f809 - RSS queue=0x1 - FDIR matched ID=0x1 - hw ptype: L2_ETHER L3_IPV4_EXT_UNKNOWN L4_UDP - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x1^M
ol_flags: PKT_RX_RSS_HASH PKT_RX_FDIR PKT_RX_L4_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD PKT_RX_FDIR_ID PKT_RX_OUTER_L4_CKSUM_UNKNOWN ^M
port 0/queue 1: received 1 packets^M
NIC statistics for port 0 cleared
14/01/2021 10:18:05 TestLargeVf: ============subcase test_more_than_4_queues_max_vfs============
14/01/2021 10:18:05 dut.10.240.183.67: quit
14/01/2021 10:18:06 dut.10.240.183.67: ^M^M
Telling cores to stop...^M
Waiting for lcores to finish...^M
^M
---------------------- Forward statistics for port 0 ----------------------^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
----------------------------------------------------------------------------^M
^M
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++^M
RX-packets: 0 RX-dropped: 0 RX-total: 0^M
TX-packets: 0 TX-dropped: 0 TX-total: 0^M
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++^M
^M
Done.^M
^M
Stopping port 0...^M
Stopping ports...^M
Done
14/01/2021 10:18:30 TestLargeVf: ============subcase test_more_than_max_vfs_4_queues============
14/01/2021 10:18:30 dut.10.240.183.67: quit
14/01/2021 10:18:32 dut.10.240.183.67: ^M^M
^M
Stopping port 0...^M
Stopping ports...^M
Done^M
^M
Shutting down port 0...^M
Closing ports...^M
iavf_execute_vf_cmd(): No response or return failure (-5) for cmd 9^M
iavf_disable_queues(): Failed to execute command of OP_DISABLE_QUEUES^M
iavf_stop_queues(): Fail to stop queues^M
iavf_stop_queues(): Fail to stop queues^M
Port 0 is closed^M
Done^M
^M
Bye...
14/01/2021 10:18:32 dut.10.240.183.67: echo 64 > /sys/bus/pci/devices/0000:18:00.0/sriov_numvfs
14/01/2021 10:18:32 dut.10.240.183.67:
14/01/2021 10:18:32 dut.10.240.183.67: echo 65 > /sys/bus/pci/devices/0000:18:00.0/sriov_numvfs
14/01/2021 10:18:32 dut.10.240.183.67: -bash: echo: write error: Numerical result out of range
14/01/2021 10:18:32 TestLargeVf: Test Case test_max_vfs_4_queues Result PASSED:
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH v1] tests/large_vf:support cvl 25G
2021-01-28 8:13 Hailin Xu
2021-01-28 8:53 ` Xu, HailinX
@ 2021-02-02 8:54 ` Tu, Lijuan
1 sibling, 0 replies; 7+ messages in thread
From: Tu, Lijuan @ 2021-02-02 8:54 UTC (permalink / raw)
To: Xu, HailinX, dts; +Cc: Xu, HailinX
> Add the judgment condition of cvl 25g network card, because 25g is different
> from 100g.
> 1. The max number of VFS is different
> 2. The number of VFS that can use 256 queues is different
> 3. fix hard code
>
> Signed-off-by: Hailin Xu <hailinx.xu@intel.com>
Applied, thanks
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2021-02-02 8:54 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-14 3:08 [dts] [PATCH v1] tests/large_vf:support cvl 25G Xu Hailin
2021-01-14 5:21 ` Zhao, HaiyangX
2021-01-19 6:32 ` Tu, Lijuan
2021-01-28 7:50 Hailin Xu
2021-01-28 8:13 Hailin Xu
2021-01-28 8:53 ` Xu, HailinX
2021-02-02 8:54 ` Tu, Lijuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).