From: Wei Ling <weix.ling@intel.com>
To: dts@dpdk.org
Cc: Wei Ling <weix.ling@intel.com>
Subject: [dts][PATCH V2 2/2] tests/vm2vm_virtio_pmd_cbdma: modify testsuite to test virito dequeue
Date: Fri, 29 Jul 2022 03:27:28 -0400 [thread overview]
Message-ID: <20220729072728.1007912-1-weix.ling@intel.com> (raw)
From DPDK-22.07, virtio support async dequeue for split and packed ring
path, so modify vm2vm_virtio_pmd_cbdma testsuite to test the split and
packed ring async dequeue feature.
Signed-off-by: Wei Ling <weix.ling@intel.com>
---
tests/TestSuite_vm2vm_virtio_pmd_cbdma.py | 546 +++++++++++++++++-----
1 file changed, 420 insertions(+), 126 deletions(-)
diff --git a/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py b/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py
index b926534e..f64032e2 100644
--- a/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py
+++ b/tests/TestSuite_vm2vm_virtio_pmd_cbdma.py
@@ -1,6 +1,31 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2022 Intel Corporation
#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
DPDK Test suite.
@@ -20,7 +45,7 @@ from framework.test_case import TestCase
from framework.virt_common import VM
-class TestVM2VMVirtioPmdCbdma(TestCase):
+class TestVM2VMVirtioPmdCBDMA(TestCase):
def set_up_all(self):
self.dut_ports = self.dut.get_ports()
self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
@@ -48,6 +73,7 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
self.result_table_create(self.table_header)
self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#")
self.dut.send_expect("killall -s INT qemu-system-x86_64", "#")
+ self.dut.send_expect("killall -s INT perf", "#")
self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
self.vm_num = 2
self.vm_dut = []
@@ -91,33 +117,6 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
60,
)
- @staticmethod
- def generate_dms_param(queues):
- das_list = []
- for i in range(queues):
- das_list.append("txq{}".format(i))
- das_param = "[{}]".format(";".join(das_list))
- return das_param
-
- @staticmethod
- def generate_lcore_dma_param(cbdma_list, core_list):
- group_num = int(len(cbdma_list) / len(core_list))
- lcore_dma_list = []
- if len(cbdma_list) == 1:
- for core in core_list:
- lcore_dma_list.append("lcore{}@{}".format(core, cbdma_list[0]))
- elif len(core_list) == 1:
- for cbdma in cbdma_list:
- lcore_dma_list.append("lcore{}@{}".format(core_list[0], cbdma))
- else:
- for cbdma in cbdma_list:
- core_list_index = int(cbdma_list.index(cbdma) / group_num)
- lcore_dma_list.append(
- "lcore{}@{}".format(core_list[core_list_index], cbdma)
- )
- lcore_dma_param = "[{}]".format(",".join(lcore_dma_list))
- return lcore_dma_param
-
def start_vhost_testpmd(self, cores, ports, prefix, eal_param, param):
"""
launch the testpmd with different parameters
@@ -130,6 +129,7 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
def start_vms(
self,
vm_queue,
+ mergeable=True,
packed=False,
server_mode=True,
restart_vm1=False,
@@ -152,14 +152,22 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
else:
vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i + ",server"
vm_params["opt_mac"] = "52:54:00:00:00:0%d" % (i + 1)
- if not packed:
+ if mergeable:
+ mrg_rxbuf = "on"
+ else:
+ mrg_rxbuf = "off"
+ if packed:
vm_params[
"opt_settings"
- ] = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on"
+ ] = "disable-modern=false,mrg_rxbuf={},mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on".format(
+ mrg_rxbuf
+ )
else:
vm_params[
"opt_settings"
- ] = "disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on"
+ ] = "disable-modern=false,mrg_rxbuf={},mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on".format(
+ mrg_rxbuf
+ )
vm_info.set_vm_device(**vm_params)
time.sleep(3)
try:
@@ -172,32 +180,42 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
self.vm_dut.append(vm_dut)
self.vm.append(vm_info)
- def start_vm0_testpmd(self):
- param = "--tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000"
- self.vm0_pmd.start_testpmd(cores="default", param=param)
- self.vm0_pmd.execute_cmd("set fwd mac")
- self.vm0_pmd.execute_cmd("start")
-
- def start_vm1_testpmd(self, resend=False):
- param = "--tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000"
- if not resend:
- self.vm1_pmd.start_testpmd(cores="default", param=param)
- self.vm1_pmd.execute_cmd("set fwd mac")
- self.vm1_pmd.execute_cmd(
- "set txpkts 64,256,512,1024,2000,64,256,512,1024,2000"
+ def start_vm_testpmd(self, vm_pmd, queues, mergeable=True):
+ if mergeable:
+ param = "--enable-hw-vlan-strip --txq={} --rxq={} --txd=1024 --rxd=1024 --max-pkt-len=9600 --tx-offloads=0x00 --rx-offloads=0x00002000".format(
+ queues, queues
)
- self.vm1_pmd.execute_cmd("start tx_first 1")
else:
- self.vm1_pmd.execute_cmd("stop")
- self.vm0_pmd.execute_cmd("start")
- self.vm0_pmd.execute_cmd("clear port stats all")
- self.vhost_user_pmd.execute_cmd("clear port stats all")
- self.vm1_pmd.execute_cmd("clear port stats all")
- self.vm1_pmd.execute_cmd("start tx_first 1")
+ param = "--enable-hw-vlan-strip --txq={} --rxq={} --txd=1024 --rxd=1024 --tx-offloads=0x00".format(
+ queues, queues
+ )
+ vm_pmd.start_testpmd(cores="default", param=param)
+ vm_pmd.execute_cmd("set fwd mac")
+
+ def send_big_imix_packets_from_vm1(self):
+ self.vm1_pmd.execute_cmd("set txpkts 64,256,512,1024,2000,64,256,512,1024,2000")
+ self.vm1_pmd.execute_cmd("start tx_first 32")
+ self.vm1_pmd.execute_cmd("show port stats all")
+
+ def send_small_imix_packets_from_vm1(self):
+ self.vm1_pmd.execute_cmd("set txpkts 64,256,512")
+ self.vm1_pmd.execute_cmd("start tx_first 32")
+ self.vm1_pmd.execute_cmd("show port stats all")
+
+ def send_small_imix_packets_from_vm1(self):
+ self.vm1_pmd.execute_cmd("set txpkts 64,256,512")
+ self.vm1_pmd.execute_cmd("start tx_first 32")
+ self.vm1_pmd.execute_cmd("show port stats all")
+
+ def send_64b_packets_from_vm1(self):
+ self.vm1_pmd.execute_cmd("stop")
+ self.vm1_pmd.execute_cmd("start tx_first 32")
+ self.vm1_pmd.execute_cmd("show port stats all")
def check_packets_of_each_queue(self, vm_pmd, queues):
vm_pmd.execute_cmd("show port stats all")
out = vm_pmd.execute_cmd("stop")
+ self.logger.info(out)
for queue in range(queues):
reg = "Queue= %d" % queue
index = out.find(reg)
@@ -211,28 +229,94 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
+ "rx-packets: {}, tx-packets: {}".format(rx_packets, tx_packets),
)
- def test_vm2vm_virtio_pmd_split_ring_mergeable_path_8_queues_cbdma_enable_with_server_mode_stable_test(
+ def dynamic_change_queue_size(self, dut_pmd, queues):
+ dut_pmd.execute_cmd("stop")
+ dut_pmd.execute_cmd("port stop all")
+ dut_pmd.execute_cmd("port config all rxq {}".format(queues))
+ dut_pmd.execute_cmd("port config all txq {}".format(queues))
+ dut_pmd.execute_cmd("port start all")
+ dut_pmd.execute_cmd("start")
+
+ def get_and_verify_func_name_of_perf_top(self, func_name_list):
+ self.dut.send_expect("rm -fr perf_top.log", "# ", 120)
+ self.dut.send_expect("perf top > perf_top.log", "", 120)
+ time.sleep(10)
+ self.dut.send_expect("^C", "#")
+ out = self.dut.send_expect("cat perf_top.log", "# ", 120)
+ self.logger.info(out)
+ for func_name in func_name_list:
+ self.verify(
+ func_name in out,
+ "the func_name {} is not in the perf top output".format(func_name),
+ )
+
+ def test_vm2vm_virtio_pmd_split_ring_mergeable_path_dynamic_queue_size_with_cbdma_enable_and_server_mode(
self,
):
"""
- Test Case 1: VM2VM virtio-pmd split ring mergeable path 8 queues CBDMA enable with server mode stable test
+ Test Case 1: VM2VM virtio-pmd split ring mergeable path dynamic queue size with cbdma enable and server mode
"""
+ self.check_path = ["virtio_dev_rx_async", "virtio_dev_tx_async"]
self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
- dmas = self.generate_dms_param(8)
- lcore_dma = self.generate_lcore_dma_param(
- cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:]
+ lcore_dma = (
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s"
+ % (
+ self.vhost_core_list[1],
+ self.cbdma_list[0],
+ self.vhost_core_list[1],
+ self.cbdma_list[1],
+ self.vhost_core_list[1],
+ self.cbdma_list[2],
+ self.vhost_core_list[1],
+ self.cbdma_list[3],
+ self.vhost_core_list[2],
+ self.cbdma_list[4],
+ self.vhost_core_list[2],
+ self.cbdma_list[5],
+ self.vhost_core_list[2],
+ self.cbdma_list[6],
+ self.vhost_core_list[2],
+ self.cbdma_list[7],
+ self.vhost_core_list[3],
+ self.cbdma_list[8],
+ self.vhost_core_list[3],
+ self.cbdma_list[9],
+ self.vhost_core_list[3],
+ self.cbdma_list[10],
+ self.vhost_core_list[3],
+ self.cbdma_list[11],
+ self.vhost_core_list[4],
+ self.cbdma_list[12],
+ self.vhost_core_list[4],
+ self.cbdma_list[13],
+ self.vhost_core_list[4],
+ self.cbdma_list[14],
+ self.vhost_core_list[4],
+ self.cbdma_list[15],
+ )
)
eal_param = (
- "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas={}'".format(
- dmas
- )
- + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas={}'".format(
- dmas
- )
+ "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;rxq0;rxq1;rxq2;rxq3]'"
+ + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;rxq0;rxq1;rxq2;rxq3]'"
)
param = (
- "--nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
- + " --lcore-dma={}".format(lcore_dma)
+ "--nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4"
+ + " --lcore-dma=[%s]" % lcore_dma
)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -241,16 +325,35 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
eal_param=eal_param,
param=param,
)
- self.start_vms(vm_queue=8, packed=False, server_mode=True)
+ self.start_vms(vm_queue=8, mergeable=True, packed=False, server_mode=True)
self.vm0_pmd = PmdOutput(self.vm_dut[0])
self.vm1_pmd = PmdOutput(self.vm_dut[1])
- self.start_vm0_testpmd()
- self.start_vm1_testpmd(resend=False)
+ self.start_vm_testpmd(vm_pmd=self.vm0_pmd, queues=8, mergeable=True)
+ self.vm0_pmd.execute_cmd("start")
+ self.start_vm_testpmd(vm_pmd=self.vm1_pmd, queues=8, mergeable=True)
+ self.send_big_imix_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
+ self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4)
+ self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=4)
+
+ self.dynamic_change_queue_size(dut_pmd=self.vhost_user_pmd, queues=8)
+ self.vm0_pmd.execute_cmd("start")
+ self.send_64b_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8)
self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=8)
- for _ in range(10):
+
+ for _ in range(5):
self.logger.info("Quit and relaunch vhost side testpmd")
- self.vhost_user_pmd.execute_cmd("quit", "#")
+ self.vhost_user_pmd.quit()
+ eal_param = (
+ "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'"
+ + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'"
+ )
+ param = (
+ "--nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
+ + " --lcore-dma=[%s]" % lcore_dma
+ )
self.start_vhost_testpmd(
cores=self.vhost_core_list,
ports=self.cbdma_list,
@@ -258,32 +361,78 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
eal_param=eal_param,
param=param,
)
- self.start_vm1_testpmd(resend=True)
+ self.vm0_pmd.execute_cmd("start")
+ self.send_64b_packets_from_vm1()
self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8)
self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=8)
- def test_vm2vm_virtio_pmd_split_ring_mergeable_path_dynamic_queue_size_cbdma_enable_with_server_mode_test(
+ def test_vm2vm_virtio_pmd_split_ring_non_mergeable_path_dynamic_queue_size_with_cbdma_enable_and_server_mode(
self,
):
"""
- Test Case 2: VM2VM virtio-pmd split ring mergeable path dynamic queue size CBDMA enable with server mode test
+ Test Case 2: VM2VM virtio-pmd split ring non-mergeable path dynamic queue size with cbdma enable and server mode
"""
+ self.check_path = ["virtio_dev_rx_async", "virtio_dev_tx_async"]
self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
- dmas = self.generate_dms_param(4)
- lcore_dma = self.generate_lcore_dma_param(
- cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:]
+ lcore_dma = (
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s"
+ % (
+ self.vhost_core_list[1],
+ self.cbdma_list[0],
+ self.vhost_core_list[1],
+ self.cbdma_list[1],
+ self.vhost_core_list[1],
+ self.cbdma_list[2],
+ self.vhost_core_list[1],
+ self.cbdma_list[3],
+ self.vhost_core_list[2],
+ self.cbdma_list[4],
+ self.vhost_core_list[2],
+ self.cbdma_list[5],
+ self.vhost_core_list[2],
+ self.cbdma_list[6],
+ self.vhost_core_list[2],
+ self.cbdma_list[7],
+ self.vhost_core_list[3],
+ self.cbdma_list[8],
+ self.vhost_core_list[3],
+ self.cbdma_list[9],
+ self.vhost_core_list[3],
+ self.cbdma_list[10],
+ self.vhost_core_list[3],
+ self.cbdma_list[11],
+ self.vhost_core_list[4],
+ self.cbdma_list[12],
+ self.vhost_core_list[4],
+ self.cbdma_list[13],
+ self.vhost_core_list[4],
+ self.cbdma_list[14],
+ self.vhost_core_list[4],
+ self.cbdma_list[15],
+ )
)
eal_param = (
- "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas={}'".format(
- dmas
- )
- + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas={}'".format(
- dmas
- )
+ "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;rxq0;rxq1;rxq2;rxq3]'"
+ + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;rxq0;rxq1;rxq2;rxq3]'"
)
param = (
- " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4"
- + " --lcore-dma={}".format(lcore_dma)
+ "--nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4"
+ + " --lcore-dma=[%s]" % lcore_dma
)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -292,54 +441,91 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
eal_param=eal_param,
param=param,
)
- self.start_vms(vm_queue=8, packed=False, server_mode=True)
+ self.start_vms(vm_queue=8, mergeable=False, packed=False, server_mode=True)
self.vm0_pmd = PmdOutput(self.vm_dut[0])
self.vm1_pmd = PmdOutput(self.vm_dut[1])
- self.start_vm0_testpmd()
- self.start_vm1_testpmd(resend=False)
+ self.start_vm_testpmd(vm_pmd=self.vm0_pmd, queues=8, mergeable=False)
+ self.vm0_pmd.execute_cmd("start")
+ self.start_vm_testpmd(vm_pmd=self.vm1_pmd, queues=8, mergeable=False)
+ self.send_small_imix_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
+ self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4)
+ self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=4)
+
+ self.dynamic_change_queue_size(dut_pmd=self.vm0_pmd, queues=4)
+ self.dynamic_change_queue_size(dut_pmd=self.vm1_pmd, queues=4)
+ self.send_64b_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4)
self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=4)
- for _ in range(10):
- self.logger.info("Quit and relaunch vhost side testpmd with 8 queues")
- self.vhost_user_pmd.execute_cmd("quit", "#")
- dmas = self.generate_dms_param(8)
- eal_param = "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas={}'".format(
- dmas
- ) + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas={}'".format(
- dmas
- )
- param = (
- " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
- + " --lcore-dma={}".format(lcore_dma)
- )
- self.start_vhost_testpmd(
- cores=self.vhost_core_list,
- ports=self.cbdma_list,
- prefix="vhost",
- eal_param=eal_param,
- param=param,
- )
- self.start_vm1_testpmd(resend=True)
- self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8)
- self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=8)
- def test_vm2vm_virtio_pmd_packed_ring_mergeable_path_8_queues_cbdma_enable_test(
+ def test_vm2vm_virtio_pmd_packed_ring_mergeable_path_dynamic_queue_size_with_cbdma_enable_and_server_mode(
self,
):
"""
- Test Case 3: VM2VM virtio-pmd packed ring mergeable path 8 queues CBDMA enable test
+ Test Case 3: VM2VM virtio-pmd packed ring mergeable path dynamic queue size with cbdma enable and server mode
"""
+ self.check_path = ["virtio_dev_rx_async"]
self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
- dmas = self.generate_dms_param(8)
- lcore_dma = self.generate_lcore_dma_param(
- cbdma_list=self.cbdma_list, core_list=self.vhost_core_list[1:]
+ lcore_dma = (
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s"
+ % (
+ self.vhost_core_list[1],
+ self.cbdma_list[0],
+ self.vhost_core_list[1],
+ self.cbdma_list[1],
+ self.vhost_core_list[1],
+ self.cbdma_list[2],
+ self.vhost_core_list[1],
+ self.cbdma_list[3],
+ self.vhost_core_list[2],
+ self.cbdma_list[4],
+ self.vhost_core_list[2],
+ self.cbdma_list[5],
+ self.vhost_core_list[2],
+ self.cbdma_list[6],
+ self.vhost_core_list[2],
+ self.cbdma_list[7],
+ self.vhost_core_list[3],
+ self.cbdma_list[8],
+ self.vhost_core_list[3],
+ self.cbdma_list[9],
+ self.vhost_core_list[3],
+ self.cbdma_list[10],
+ self.vhost_core_list[3],
+ self.cbdma_list[11],
+ self.vhost_core_list[4],
+ self.cbdma_list[12],
+ self.vhost_core_list[4],
+ self.cbdma_list[13],
+ self.vhost_core_list[4],
+ self.cbdma_list[14],
+ self.vhost_core_list[4],
+ self.cbdma_list[15],
+ )
+ )
+ eal_param = (
+ "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'"
+ + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'"
)
- eal_param = "--vdev 'net_vhost0,iface=vhost-net0,queues=8,dmas={}'".format(
- dmas
- ) + " --vdev 'net_vhost1,iface=vhost-net1,queues=8,dmas={}'".format(dmas)
param = (
- " --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
- + " --lcore-dma={}".format(lcore_dma)
+ "--nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4"
+ + " --lcore-dma=[%s]" % lcore_dma
)
self.start_vhost_testpmd(
cores=self.vhost_core_list,
@@ -348,22 +534,130 @@ class TestVM2VMVirtioPmdCbdma(TestCase):
eal_param=eal_param,
param=param,
)
- self.start_vms(vm_queue=8, packed=True, server_mode=False)
+ self.start_vms(vm_queue=8, mergeable=True, packed=True, server_mode=True)
self.vm0_pmd = PmdOutput(self.vm_dut[0])
self.vm1_pmd = PmdOutput(self.vm_dut[1])
- self.start_vm0_testpmd()
- self.start_vm1_testpmd(resend=False)
- self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8)
- self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=8)
+ self.start_vm_testpmd(vm_pmd=self.vm0_pmd, queues=8, mergeable=True)
+ self.vm0_pmd.execute_cmd("start")
+ self.start_vm_testpmd(vm_pmd=self.vm1_pmd, queues=8, mergeable=True)
+ self.send_big_imix_packets_from_vm1()
+ self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4)
+ self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=4)
+
self.logger.info("Quit and relaunch VM2 with split ring")
self.vm1_pmd.execute_cmd("quit", "#")
self.vm[1].stop()
self.vm_dut.remove(self.vm_dut[1])
self.vm.remove(self.vm[1])
- self.start_vms(vm_queue=8, packed=False, restart_vm1=True, server_mode=False)
+ self.start_vms(
+ vm_queue=8, mergeable=True, packed=False, restart_vm1=True, server_mode=True
+ )
+ self.vm1_pmd = PmdOutput(self.vm_dut[1])
+ self.vm0_pmd.execute_cmd("start")
+ self.start_vm_testpmd(vm_pmd=self.vm1_pmd, queues=8)
+ self.send_big_imix_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
+ self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4)
+ self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=4)
+
+ self.dynamic_change_queue_size(dut_pmd=self.vhost_user_pmd, queues=8)
+ self.vm0_pmd.execute_cmd("start")
+ self.send_64b_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
+ self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8)
+ self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=8)
+
+ def test_vm2vm_virtio_pmd_packed_ring_non_mergeable_path_dynamic_queue_size_with_cbdma_enable_and_server_mode(
+ self,
+ ):
+ """
+ Test Case 4: VM2VM virtio-pmd packed ring non-mergeable path dynamic queue size with cbdma enable and server mode
+ """
+ self.check_path = ["virtio_dev_rx_async"]
+ self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)
+ lcore_dma = (
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s,"
+ "lcore%s@%s"
+ % (
+ self.vhost_core_list[1],
+ self.cbdma_list[0],
+ self.vhost_core_list[1],
+ self.cbdma_list[1],
+ self.vhost_core_list[1],
+ self.cbdma_list[2],
+ self.vhost_core_list[1],
+ self.cbdma_list[3],
+ self.vhost_core_list[2],
+ self.cbdma_list[4],
+ self.vhost_core_list[2],
+ self.cbdma_list[5],
+ self.vhost_core_list[2],
+ self.cbdma_list[6],
+ self.vhost_core_list[2],
+ self.cbdma_list[7],
+ self.vhost_core_list[3],
+ self.cbdma_list[8],
+ self.vhost_core_list[3],
+ self.cbdma_list[9],
+ self.vhost_core_list[3],
+ self.cbdma_list[10],
+ self.vhost_core_list[3],
+ self.cbdma_list[11],
+ self.vhost_core_list[4],
+ self.cbdma_list[12],
+ self.vhost_core_list[4],
+ self.cbdma_list[13],
+ self.vhost_core_list[4],
+ self.cbdma_list[14],
+ self.vhost_core_list[4],
+ self.cbdma_list[15],
+ )
+ )
+ eal_param = (
+ "--vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq5;txq6;txq7]'"
+ + " --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq5;txq6;txq7]'"
+ )
+ param = (
+ "--nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8"
+ + " --lcore-dma=[%s]" % lcore_dma
+ )
+ self.start_vhost_testpmd(
+ cores=self.vhost_core_list,
+ ports=self.cbdma_list,
+ prefix="vhost",
+ eal_param=eal_param,
+ param=param,
+ )
+ self.start_vms(vm_queue=8, mergeable=False, packed=True, server_mode=True)
+ self.vm0_pmd = PmdOutput(self.vm_dut[0])
self.vm1_pmd = PmdOutput(self.vm_dut[1])
+ self.start_vm_testpmd(vm_pmd=self.vm0_pmd, queues=4, mergeable=False)
+ self.vm0_pmd.execute_cmd("start")
+ self.start_vm_testpmd(vm_pmd=self.vm1_pmd, queues=4, mergeable=False)
+ self.send_small_imix_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
+ self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=4)
+ self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=4)
+
+ self.dynamic_change_queue_size(self.vm0_pmd, queues=8)
+ self.dynamic_change_queue_size(self.vm1_pmd, queues=8)
self.vm0_pmd.execute_cmd("start")
- self.start_vm1_testpmd(resend=False)
+ self.send_64b_packets_from_vm1()
+ self.get_and_verify_func_name_of_perf_top(self.check_path)
self.check_packets_of_each_queue(vm_pmd=self.vm0_pmd, queues=8)
self.check_packets_of_each_queue(vm_pmd=self.vm1_pmd, queues=8)
--
2.25.1
next reply other threads:[~2022-07-29 7:32 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-29 7:27 Wei Ling [this message]
2022-08-01 2:30 ` Huang, ChenyuX
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220729072728.1007912-1-weix.ling@intel.com \
--to=weix.ling@intel.com \
--cc=dts@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).