From: lihong <lihongx.ma@intel.com>
To: dts@dpdk.org
Cc: lihong <lihongx.ma@intel.com>
Subject: [dts] [PATCH V1] tests/loopback_multi_queues: update according to test plan
Date: Wed, 17 Jul 2019 09:34:51 +0800 [thread overview]
Message-ID: <1563327291-8026-1-git-send-email-lihongx.ma@intel.com> (raw)
1. remove test case of virtio1.1 inorder path
2. add 8 queues to all cases and check the performance value is eight
times to 1 queue
3. optimization code
Signed-off-by: lihong <lihongx.ma@intel.com>
---
tests/TestSuite_loopback_multi_queues.py | 196 ++++++++++++++++++-------------
1 file changed, 116 insertions(+), 80 deletions(-)
diff --git a/tests/TestSuite_loopback_multi_queues.py b/tests/TestSuite_loopback_multi_queues.py
index 5ab17fd..b6c3d25 100644
--- a/tests/TestSuite_loopback_multi_queues.py
+++ b/tests/TestSuite_loopback_multi_queues.py
@@ -31,10 +31,9 @@
"""
DPDK Test suite.
-Test vhost/virtio-user loopback multi-queues on 8 tx/rx path.
+Test vhost/virtio-user loopback multi-queues on 7 tx/rx path.
Includes Mergeable, Normal, Vector_RX, Inorder mergeable,
-Inorder no-mergeable, Virtio 1.1 mergeable, Virtio 1.1 no-mergeable Path,
-Virtio 1.1 inorder no-mergeable Path.
+Inorder no-mergeable, Virtio 1.1 mergeable, Virtio 1.1 no-mergeable Path.
"""
import utils
@@ -50,19 +49,20 @@ class TestLoopbackMultiQueues(TestCase):
Run at the start of each test suite.
"""
self.frame_sizes = [64, 128, 256, 512, 1024, 1518]
- self.core_config = "1S/18C/1T"
- self.nb_cores = 2
- self.queue_number = 2
+ self.verify_queue = [1, 8]
self.cores_num = len([n for n in self.dut.cores if int(n['socket']) == 0])
- self.verify(self.cores_num >= 18,
- "There has not enought cores to test this case")
- self.core_list = self.dut.get_core_list(self.core_config)
self.logger.info("you can config packet_size in file %s.cfg," % self.suite_name + \
"in region 'suite' like packet_sizes=[64, 128, 256]")
# get the frame_sizes from cfg file
if 'packet_sizes' in self.get_suite_cfg():
self.frame_sizes = self.get_suite_cfg()['packet_sizes']
+ # set diff arg about mem_socket base on socket number
+ if len(set([int(core['socket']) for core in self.dut.cores])) == 1:
+ self.socket_mem = '1024'
+ else:
+ self.socket_mem = '1024,1024'
+
def set_up(self):
"""
Run before each test case.
@@ -70,8 +70,9 @@ class TestLoopbackMultiQueues(TestCase):
# Prepare the result table
self.dut.send_expect("rm -rf ./vhost-net*", "#")
self.dut.send_expect("killall -s INT testpmd", "#")
- self.table_header = ["Frame", "Mode", "Throughput(Mpps)"]
+ self.table_header = ["Frame", "Mode", "Throughput(Mpps)", "Queue Number"]
self.result_table_create(self.table_header)
+ self.data_verify = {}
self.vhost = self.dut.new_session(suite="vhost")
self.virtio_user = self.dut.new_session(suite="virtio-user")
@@ -80,6 +81,11 @@ class TestLoopbackMultiQueues(TestCase):
"""
get the coremask about vhost and virito depend on the queue number
"""
+ self.core_config = "1S/%dC/1T" % (2*self.nb_cores+2)
+ self.verify(self.cores_num >= (2*self.nb_cores+2),
+ "There has not enought cores to test this case %s" %
+ self.running_case)
+ self.core_list = self.dut.get_core_list(self.core_config)
self.core_list_user = self.core_list[0:self.nb_cores + 1]
self.core_list_host = self.core_list[self.nb_cores + 1:2 * self.nb_cores + 2]
self.core_mask_user = utils.create_mask(self.core_list_user)
@@ -89,13 +95,13 @@ class TestLoopbackMultiQueues(TestCase):
"""
start testpmd on vhost
"""
- command_line_client = self.dut.target + "/app/testpmd -n %d -c %s --socket-mem 1024,1024" + \
+ command_line_client = self.dut.target + "/app/testpmd -n %d -c %s --socket-mem %s" + \
" --legacy-mem --no-pci --file-prefix=vhost --vdev " + \
"'net_vhost0,iface=vhost-net,queues=%d' -- -i --nb-cores=%d " + \
"--rxq=%d --txq=%d --txd=1024 --rxd=1024"
command_line_client = command_line_client % (
- self.dut.get_memory_channels(), self.core_mask_host, self.queue_number,
- self.nb_cores, self.queue_number, self.queue_number)
+ self.dut.get_memory_channels(), self.core_mask_host, self.socket_mem,
+ self.queue_number, self.nb_cores, self.queue_number, self.queue_number)
self.vhost.send_expect(command_line_client, "testpmd> ", 120)
self.vhost.send_expect("set fwd mac", "testpmd> ", 120)
@@ -104,12 +110,13 @@ class TestLoopbackMultiQueues(TestCase):
start testpmd on virtio
"""
command_line_user = self.dut.target + "/app/testpmd -n %d -c %s " + \
- " --socket-mem 1024,1024 --legacy-mem --no-pci --file-prefix=virtio " + \
+ " --socket-mem %s --legacy-mem --no-pci --file-prefix=virtio " + \
"--vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,queues=%d,%s " + \
"-- -i %s --rss-ip --nb-cores=%d --rxq=%d --txq=%d --txd=1024 --rxd=1024"
command_line_user = command_line_user % (
- self.dut.get_memory_channels(), self.core_mask_user, self.queue_number,
- args["version"], args["path"], self.nb_cores, self.queue_number, self.queue_number)
+ self.dut.get_memory_channels(), self.core_mask_user, self.socket_mem,
+ self.queue_number, args["version"], args["path"], self.nb_cores,
+ self.queue_number, self.queue_number)
self.virtio_user.send_expect(command_line_user, "testpmd> ", 120)
self.virtio_user.send_expect("set fwd mac", "testpmd> ", 120)
self.virtio_user.send_expect("start", "testpmd> ", 120)
@@ -134,8 +141,13 @@ class TestLoopbackMultiQueues(TestCase):
results_row = [frame_size]
results_row.append(case_info)
results_row.append(Mpps)
+ results_row.append(self.queue_number)
self.result_table_add(results_row)
+ # recording the value of 64 packet_size
+ if frame_size == 64:
+ self.data_verify['queue%d-64' % self.queue_number] = Mpps
+
def check_packets_of_each_queue(self, frame_size):
"""
check each queue has receive packets
@@ -165,8 +177,18 @@ class TestLoopbackMultiQueues(TestCase):
self.vhost.send_expect("start tx_first 32", "testpmd> ", 30)
Mpps = self.calculate_avg_throughput()
self.update_result_table(frame_size, case_info, Mpps)
- self.check_packets_of_each_queue(frame_size)
- self.result_table_print()
+ if self.queue_number > 1:
+ self.check_packets_of_each_queue(frame_size)
+
+ def verify_liner_for_multi_queue(self):
+ """
+ verify the Mpps of 8 queues is eight times of 1 queue
+ and allow 0.1 drop for 8 queues
+ """
+ if self.data_verify:
+ drop = self.data_verify['queue1-64']*8*(0.1)
+ self.verify(self.data_verify['queue8-64'] >= self.data_verify['queue1-64']*8 - drop,
+ 'The data of multiqueue is not linear for %s' % self.running_case)
def close_all_testpmd(self):
"""
@@ -188,13 +210,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=1",
"path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 2
- self.queue_number = 2
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("virtio_1.1 mergeable on")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("virtio_1.1 mergeable on")
+ self.close_all_testpmd()
+
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def test_loopback_multi_queue_virtio11_normal(self):
"""
@@ -202,27 +228,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "in_order=0,packed_vq=1,mrg_rxbuf=0",
"path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 2
- self.queue_number = 2
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("virtio_1.1 normal")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("virtio_1.1 normal")
+ self.close_all_testpmd()
- def test_loopback_multi_queue_virtio11_inorder(self):
- """
- performance for Vhost PVP virtio1.1 inorder Path.
- """
- virtio_pmd_arg = {"version": "in_order=1,packed_vq=1,mrg_rxbuf=0",
- "path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 2
- self.queue_number = 2
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("virtio_1.1 inorder")
- self.close_all_testpmd()
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def test_loopback_multi_queue_inorder_mergeable(self):
"""
@@ -230,13 +246,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=1",
"path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 2
- self.queue_number = 2
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("inoder mergeable on")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("inoder mergeable on")
+ self.close_all_testpmd()
+
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def test_loopback_multi_queue_inorder_no_mergeable(self):
"""
@@ -244,13 +264,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "packed_vq=0,in_order=1,mrg_rxbuf=0",
"path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 2
- self.queue_number = 2
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("inoder mergeable off")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("inoder mergeable off")
+ self.close_all_testpmd()
+
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def test_loopback_mulit_queue_mergeable(self):
"""
@@ -258,13 +282,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=1",
"path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 8
- self.queue_number = 8
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("virito mergeable")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("virito mergeable")
+ self.close_all_testpmd()
+
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def test_loopback_multi_queue_normal(self):
"""
@@ -272,13 +300,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
"path": "--tx-offloads=0x0 --enable-hw-vlan-strip"}
- self.nb_cores = 8
- self.queue_number = 8
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("virtio normal")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("virtio normal")
+ self.close_all_testpmd()
+
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def test_loopback_multi_queue_vector_rx(self):
"""
@@ -286,13 +318,17 @@ class TestLoopbackMultiQueues(TestCase):
"""
virtio_pmd_arg = {"version": "packed_vq=0,in_order=0,mrg_rxbuf=0",
"path": "--tx-offloads=0x0"}
- self.nb_cores = 8
- self.queue_number = 8
- self.get_core_mask()
- self.start_vhost_testpmd()
- self.start_virtio_testpmd(virtio_pmd_arg)
- self.send_and_verify("virito vector rx")
- self.close_all_testpmd()
+ for i in self.verify_queue:
+ self.nb_cores = i
+ self.queue_number = i
+ self.get_core_mask()
+ self.start_vhost_testpmd()
+ self.start_virtio_testpmd(virtio_pmd_arg)
+ self.send_and_verify("virito vector rx")
+ self.close_all_testpmd()
+
+ self.result_table_print()
+ self.verify_liner_for_multi_queue()
def tear_down(self):
"""
--
2.7.4
next reply other threads:[~2019-07-17 8:57 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-17 1:34 lihong [this message]
2019-08-07 6:31 ` Tu, Lijuan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1563327291-8026-1-git-send-email-lihongx.ma@intel.com \
--to=lihongx.ma@intel.com \
--cc=dts@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).