* [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma
@ 2021-07-13 15:50 Yang Lewei
2021-07-22 23:19 ` Wang, Yinan
0 siblings, 1 reply; 6+ messages in thread
From: Yang Lewei @ 2021-07-13 15:50 UTC (permalink / raw)
To: dts; +Cc: Yang Lewei
1. change cases using imix pkts for better coverage:
- test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations
- test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations
2. add one cbdma performance case:
- test_perf_compare_pvp_split_ring_performance
Signed-off-by: Yang Lewei <leweix.yang@intel.com>
---
tests/TestSuite_vhost_cbdma.py | 213 +++++++++++++++++++--------------
1 file changed, 123 insertions(+), 90 deletions(-)
diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py
index 6bd5919b..16fef645 100644
--- a/tests/TestSuite_vhost_cbdma.py
+++ b/tests/TestSuite_vhost_cbdma.py
@@ -99,10 +99,6 @@ class TestVirTioVhostCbdma(TestCase):
self.test_parameters = self.get_suite_cfg()['test_parameters']
# traffic duraion in second
self.test_duration = self.get_suite_cfg()['test_duration']
- # traffic packet length mode
- # 'fixed' or 'imix', default is 'fixed'
- suite_cfg = self.get_suite_cfg()
- self.pkt_length_mode = (suite_cfg or {}).get('pkt_length_mode') or 'fixed'
# initialize throughput attribution
# {'TestCase':{ 'Mode': {'$framesize':{"$nb_desc": 'throughput'}}}
self.throughput = {}
@@ -271,62 +267,49 @@ class TestVirTioVhostCbdma(TestCase):
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
- used_cbdma_num = 4
- queue = 2
+ used_cbdma_num = 8
+ queue = 8
txd_rxd = 1024
dmathr = 1024
nb_cores = 1
virtio_path = "/tmp/s0"
path_mode = 'mrg_rxbuf=1,in_order=1'
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}"
- eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " % (nb_cores, txd_rxd, txd_rxd, queue, queue)
- dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params}"
+ eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
+ dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue,queue)}"
virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1"
- vhost_dev = f"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'"
+ vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'"
# launch vhost testpmd
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
- if index < used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- mode = "dynamic_queue2"
+ allow_pci.append(self.cbdma_dev_infos[index])
+
+ # no cbdma to launch vhost
+ self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]])
+ mode = "no_cbdma"
self.mode_list.append(mode)
self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
self.send_and_verify(mode, queue_list=range(queue))
- # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.virtio_user)
- self.send_and_verify("virtio_user_" + mode + "_change_to_1", queue_list=[0])
- self.mode_list.append("virtio_user_" + mode + "_change_to_1")
- self.virtio_user.send_expect("stop", "testpmd> ")
- self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "#")
+
+ # used 4 cbdma_num and 4 queue to launch vhost
+
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
+ self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2)))
+ self.mode_list.append("used_4_cbdma_num")
+ self.vhost_user.send_expect("quit", "#")
+
+ #used 8 cbdma_num to launch vhost
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],
+ dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
+ self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num")
+ self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num_1")
+ self.virtio_user.send_expect("stop", "testpmd> ", 60)
time.sleep(5)
- self.dut.send_expect(f"rm -rf {virtio_path}", "#")
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
- mode = "Relaunch_dynamic_queue2"
- self.mode_list.append(mode)
- self.send_and_verify(mode, queue_list=range(queue))
- # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.vhost_user)
- self.send_and_verify("vhost_user" + mode + "_change_to_1")
- self.mode_list.append("vhost_user" + mode + "_change_to_1")
- self.vhost_user.send_expect("quit", "# ")
- time.sleep(2)
- # Relaunch vhost with another two cbdma channels
- mode = "Relaunch_vhost_2_cbdma"
- self.mode_list.append(mode)
- dmathr = 512
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}"
- allow_pci = [self.dut.ports_info[0]['pci']]
- for index in range(used_cbdma_num):
- if index >= used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
- self.send_and_verify(mode, queue_list=range(queue))
- self.check_port_stats_result(self.virtio_user)
self.virtio_user.send_expect("quit", "# ")
self.vhost_user.send_expect("quit", "# ")
self.result_table_print()
@@ -427,8 +410,8 @@ class TestVirTioVhostCbdma(TestCase):
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
- used_cbdma_num = 4
- queue = 2
+ used_cbdma_num = 8
+ queue = 8
txd_rxd = 1024
dmathr = 1024
nb_cores = 1
@@ -436,53 +419,41 @@ class TestVirTioVhostCbdma(TestCase):
path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}"
- eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " % (nb_cores, txd_rxd, txd_rxd, queue, queue)
- dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params}"
+ eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
+ dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue, queue)}"
virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1"
- vhost_dev = f"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'"
+ vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'"
# launch vhost testpmd
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
- if index < used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- mode = "dynamic_queue2"
+ allow_pci.append(self.cbdma_dev_infos[index])
+
+ # no cbdma to launch vhost
+ self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]])
+ mode = "no_cbdma"
self.mode_list.append(mode)
self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
self.send_and_verify(mode, queue_list=range(queue))
- # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.virtio_user)
- self.send_and_verify("virtio_user_" + mode + "_change_to_1", queue_list=[0])
- self.mode_list.append("virtio_user_" + mode + "_change_to_1")
- self.virtio_user.send_expect("stop", "testpmd> ")
- self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "#")
+
+ # used 4 cbdma_num and 4 queue to launch vhost
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2],
+ dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
+ self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2)))
+ self.mode_list.append("used_4_cbdma_num")
+ self.vhost_user.send_expect("quit", "#")
+
+ #used 8 cbdma_num to launch vhost
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],
+ dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
+ self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num")
+ self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num_1")
+ self.virtio_user.send_expect("stop", "testpmd> ", 60)
time.sleep(5)
- self.dut.send_expect(f"rm -rf {virtio_path}", "#")
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
- mode = "Relaunch_dynamic_queue2"
- self.mode_list.append(mode)
- self.send_and_verify(mode, queue_list=range(queue))
- # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.vhost_user)
- self.send_and_verify("vhost_user" + mode + "_change_to_1")
- self.mode_list.append("vhost_user" + mode + "_change_to_1")
- self.vhost_user.send_expect("quit", "# ")
- time.sleep(2)
- # Relaunch vhost with another two cbdma channels
- mode = "Relaunch_vhost_2_cbdma"
- self.mode_list.append(mode)
- dmathr = 512
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}"
- allow_pci = [self.dut.ports_info[0]['pci']]
- for index in range(used_cbdma_num):
- if index >= used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
- self.send_and_verify(mode, queue_list=range(queue))
- self.check_port_stats_result(self.virtio_user)
self.virtio_user.send_expect("quit", "# ")
self.vhost_user.send_expect("quit", "# ")
self.result_table_print()
@@ -491,6 +462,68 @@ class TestVirTioVhostCbdma(TestCase):
self.handle_expected(mode_list=self.mode_list)
self.handle_results(mode_list=self.mode_list)
+
+
+ def test_perf_compare_pvp_split_ring_performance(self):
+ """
+ Test Case6: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy
+ """
+ used_cbdma_num = 1
+ queue = 1
+ txd_rxd = 1024
+ eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
+ path_mode = 'mrg_rxbuf=1,in_order=1,server=1'
+ allow_pci = [self.dut.ports_info[0]['pci']]
+ self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+ for index in range(used_cbdma_num):
+ allow_pci.append(self.cbdma_dev_infos[index])
+ path_mode = 'mrg_rxbuf=1,in_order=1'
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}],%s'"
+ compare_pvp_split_ring_performance = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
+ dev_path_mode_mapper = {
+ "sync_cbdma": ['dmathr=1024', 'dmathr=2000'],
+ "cpu": 'dmathr=0',
+ }
+ for key,dma_mode in dev_path_mode_mapper.items():
+ if key == "cpu":
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=1'"
+ self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])
+ vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
+ self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4], dev=vdevs)
+ mode = "cpu_copy_64"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode='fixed')
+ perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]
+ self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
+ self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "# ")
+ else:
+ self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]), ports=allow_pci)
+ vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'" % queue
+ self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4],dev=vdevs)
+ mode = "sync_copy_64"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode='fixed')
+ perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc]
+ mode = "cbdma_copy_1518"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')
+ perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]
+ self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
+ self.vhost_user.send_expect("quit", "# ")
+ time.sleep(3)
+ self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]), ports=allow_pci)
+ mode = "sync_copy_1518"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')
+ perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]
+ self.check_port_stats_result(self.virtio_user)
+ self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "# ")
+ self.result_table_print()
+ self.verify(abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, "sync_copy_64 vs. cpu_copy_64 delta > 10%" )
+ self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05,"cbdma_copy_1518 vs sync_copy_1518 delta < 5%")
+
@staticmethod
def vhost_or_virtio_set_one_queue(session):
session.send_expect('stop', 'testpmd> ', 120)
@@ -556,16 +589,16 @@ class TestVirTioVhostCbdma(TestCase):
# check RX/TX can work normally in each queues
self.check_packets_of_each_queue(queue_list=queue_list)
- def send_and_verify(self, mode, multiple_queue=True, queue_list=[]):
+ def send_and_verify(self, mode, multiple_queue=True, queue_list=[], frame_sizes=None, pkt_length_mode='imix'):
"""
Send packet with packet generator and verify
"""
- if self.pkt_length_mode == 'imix':
+ if pkt_length_mode == 'imix':
self.send_imix_and_verify(mode, multiple_queue, queue_list)
return
self.throughput[mode] = dict()
- for frame_size in self.frame_sizes:
+ for frame_size in frame_sizes:
self.throughput[mode][frame_size] = dict()
payload_size = frame_size - self.headers_size
tgenInput = []
--
2.17.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma
2021-07-13 15:50 [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma Yang Lewei
@ 2021-07-22 23:19 ` Wang, Yinan
2021-07-26 5:54 ` Tu, Lijuan
0 siblings, 1 reply; 6+ messages in thread
From: Wang, Yinan @ 2021-07-22 23:19 UTC (permalink / raw)
To: Yang, LeweiX, dts; +Cc: Yang, LeweiX
Acked-by: Yinan Wang <yinan.wang@intel.com>
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Yang Lewei
> Sent: 2021?7?13? 23:51
> To: dts@dpdk.org
> Cc: Yang, LeweiX <leweix.yang@intel.com>
> Subject: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about
> cbdma
>
> 1. change cases using imix pkts for better coverage:
> - test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations
> -
> test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operati
> ons
> 2. add one cbdma performance case:
> - test_perf_compare_pvp_split_ring_performance
>
> Signed-off-by: Yang Lewei <leweix.yang@intel.com>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma
2021-07-22 23:19 ` Wang, Yinan
@ 2021-07-26 5:54 ` Tu, Lijuan
0 siblings, 0 replies; 6+ messages in thread
From: Tu, Lijuan @ 2021-07-26 5:54 UTC (permalink / raw)
To: Wang, Yinan, Yang, LeweiX, dts; +Cc: Yang, LeweiX
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Wang, Yinan
> Sent: 2021年7月23日 7:19
> To: Yang, LeweiX <leweix.yang@intel.com>; dts@dpdk.org
> Cc: Yang, LeweiX <leweix.yang@intel.com>
> Subject: Re: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about
> cbdma
>
> Acked-by: Yinan Wang <yinan.wang@intel.com>
Applied
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma
@ 2021-07-06 18:11 Yang Lewei
2021-07-07 2:51 ` Wang, Yinan
2021-07-12 5:06 ` Tu, Lijuan
0 siblings, 2 replies; 6+ messages in thread
From: Yang Lewei @ 2021-07-06 18:11 UTC (permalink / raw)
To: dts; +Cc: YangLewei
From: YangLewei <leweix.yang@intel.com>
1. change cases using imix pkts for better coverage:
- test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations
- test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations
2. add one cbdma performance case:
- test_perf_compare_pvp_split_ring_performance
Signed-off-by: YangLewei <leweix.yang@intel.com>
---
tests/TestSuite_vhost_cbdma.py | 212 +++++++++++++++++++--------------
1 file changed, 125 insertions(+), 87 deletions(-)
diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py
index 6bd5919b..3523c4b3 100644
--- a/tests/TestSuite_vhost_cbdma.py
+++ b/tests/TestSuite_vhost_cbdma.py
@@ -183,11 +183,13 @@ class TestVirTioVhostCbdma(TestCase):
self.result_secondary = re.findall(r'TX-packets: (\w+)', out)
self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1, "forward packets no correctly")
+
@property
def check_2m_env(self):
out = self.dut.send_expect("cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# ")
return True if out == '2048' else False
+
def launch_testpmd_as_vhost_user(self, command, cores="Default", dev="", ports = ""):
self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix="vhost")
self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120)
@@ -271,62 +273,49 @@ class TestVirTioVhostCbdma(TestCase):
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
- used_cbdma_num = 4
- queue = 2
+ used_cbdma_num = 8
+ queue = 8
txd_rxd = 1024
dmathr = 1024
nb_cores = 1
virtio_path = "/tmp/s0"
path_mode = 'mrg_rxbuf=1,in_order=1'
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}"
- eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " % (nb_cores, txd_rxd, txd_rxd, queue, queue)
- dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params}"
+ eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
+ dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue,queue)}"
virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1"
- vhost_dev = f"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'"
+ vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'"
# launch vhost testpmd
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
- if index < used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- mode = "dynamic_queue2"
+ allow_pci.append(self.cbdma_dev_infos[index])
+
+ # no cbdma to launch vhost
+ self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]])
+ mode = "no_cbdma"
self.mode_list.append(mode)
self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
self.send_and_verify(mode, queue_list=range(queue))
- # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.virtio_user)
- self.send_and_verify("virtio_user_" + mode + "_change_to_1", queue_list=[0])
- self.mode_list.append("virtio_user_" + mode + "_change_to_1")
- self.virtio_user.send_expect("stop", "testpmd> ")
- self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "#")
+
+ # used 4 cbdma_num and 4 queue to launch vhost
+
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
+ self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2)))
+ self.mode_list.append("used_4_cbdma_num")
+ self.vhost_user.send_expect("quit", "#")
+
+ #used 8 cbdma_num to launch vhost
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],
+ dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
+ self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num")
+ self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num_1")
+ self.virtio_user.send_expect("stop", "testpmd> ", 60)
time.sleep(5)
- self.dut.send_expect(f"rm -rf {virtio_path}", "#")
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
- mode = "Relaunch_dynamic_queue2"
- self.mode_list.append(mode)
- self.send_and_verify(mode, queue_list=range(queue))
- # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.vhost_user)
- self.send_and_verify("vhost_user" + mode + "_change_to_1")
- self.mode_list.append("vhost_user" + mode + "_change_to_1")
- self.vhost_user.send_expect("quit", "# ")
- time.sleep(2)
- # Relaunch vhost with another two cbdma channels
- mode = "Relaunch_vhost_2_cbdma"
- self.mode_list.append(mode)
- dmathr = 512
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}"
- allow_pci = [self.dut.ports_info[0]['pci']]
- for index in range(used_cbdma_num):
- if index >= used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
- self.send_and_verify(mode, queue_list=range(queue))
- self.check_port_stats_result(self.virtio_user)
self.virtio_user.send_expect("quit", "# ")
self.vhost_user.send_expect("quit", "# ")
self.result_table_print()
@@ -427,8 +416,8 @@ class TestVirTioVhostCbdma(TestCase):
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
- used_cbdma_num = 4
- queue = 2
+ used_cbdma_num = 8
+ queue = 8
txd_rxd = 1024
dmathr = 1024
nb_cores = 1
@@ -436,53 +425,41 @@ class TestVirTioVhostCbdma(TestCase):
path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}"
- eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " % (nb_cores, txd_rxd, txd_rxd, queue, queue)
- dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params}"
+ eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
+ dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue, queue)}"
virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1"
- vhost_dev = f"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'"
+ vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'"
# launch vhost testpmd
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
- if index < used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- mode = "dynamic_queue2"
+ allow_pci.append(self.cbdma_dev_infos[index])
+
+ # no cbdma to launch vhost
+ self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]])
+ mode = "no_cbdma"
self.mode_list.append(mode)
self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
self.send_and_verify(mode, queue_list=range(queue))
- # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.virtio_user)
- self.send_and_verify("virtio_user_" + mode + "_change_to_1", queue_list=[0])
- self.mode_list.append("virtio_user_" + mode + "_change_to_1")
- self.virtio_user.send_expect("stop", "testpmd> ")
- self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "#")
+
+ # used 4 cbdma_num and 4 queue to launch vhost
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2],
+ dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
+ self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2)))
+ self.mode_list.append("used_4_cbdma_num")
+ self.vhost_user.send_expect("quit", "#")
+
+ #used 8 cbdma_num to launch vhost
+ vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}"
+ self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],
+ dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
+ self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num")
+ self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
+ self.mode_list.append("used_8_cbdma_num_1")
+ self.virtio_user.send_expect("stop", "testpmd> ", 60)
time.sleep(5)
- self.dut.send_expect(f"rm -rf {virtio_path}", "#")
- # queue 2 start virtio testpmd, check perforamnce and RX/TX
- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
- mode = "Relaunch_dynamic_queue2"
- self.mode_list.append(mode)
- self.send_and_verify(mode, queue_list=range(queue))
- # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues
- self.vhost_or_virtio_set_one_queue(self.vhost_user)
- self.send_and_verify("vhost_user" + mode + "_change_to_1")
- self.mode_list.append("vhost_user" + mode + "_change_to_1")
- self.vhost_user.send_expect("quit", "# ")
- time.sleep(2)
- # Relaunch vhost with another two cbdma channels
- mode = "Relaunch_vhost_2_cbdma"
- self.mode_list.append(mode)
- dmathr = 512
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}"
- allow_pci = [self.dut.ports_info[0]['pci']]
- for index in range(used_cbdma_num):
- if index >= used_cbdma_num / 2:
- allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)
- self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
- self.send_and_verify(mode, queue_list=range(queue))
- self.check_port_stats_result(self.virtio_user)
self.virtio_user.send_expect("quit", "# ")
self.vhost_user.send_expect("quit", "# ")
self.result_table_print()
@@ -491,6 +468,67 @@ class TestVirTioVhostCbdma(TestCase):
self.handle_expected(mode_list=self.mode_list)
self.handle_results(mode_list=self.mode_list)
+
+
+ def test_perf_compare_pvp_split_ring_performance(self):
+ """
+ Test Case6: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy
+ """
+ used_cbdma_num = 1
+ queue = 1
+ txd_rxd = 1024
+ eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
+ path_mode = 'mrg_rxbuf=1,in_order=1,server=1'
+ allow_pci = [self.dut.ports_info[0]['pci']]
+ self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+ for index in range(used_cbdma_num):
+ allow_pci.append(self.cbdma_dev_infos[index])
+ path_mode = 'mrg_rxbuf=1,in_order=1'
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}],%s'"
+ compare_pvp_split_ring_performance = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
+ dev_path_mode_mapper = {
+ "sync_cbdma": ['dmathr=1024', 'dmathr=2000'],
+ "cpu": 'dmathr=0',
+ }
+ for key,dma_mode in dev_path_mode_mapper.items():
+ if key == "cpu":
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=1'"
+ self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])
+ vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
+ self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4], dev=vdevs)
+ mode = "cpu_copy_64"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode=None)
+ perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]
+ self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
+ self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "# ")
+ else:
+ self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]), ports=allow_pci)
+ vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'" % queue
+ self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4],dev=vdevs)
+ mode = "sync_copy_64"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode=None)
+ perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc]
+ mode = "cbdma_copy_1518"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode=None)
+ perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]
+ self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
+ self.vhost_user.send_expect("quit", "# ")
+ time.sleep(3)
+ self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]), ports=allow_pci)
+ mode = "sync_copy_1518"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode=None)
+ perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]
+ self.check_port_stats_result(self.virtio_user)
+ self.virtio_user.send_expect("quit", "# ")
+ self.vhost_user.send_expect("quit", "# ")
+ self.result_table_print()
+ self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05 and abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, "sync_copy_64 vs. cpu_copy_64 delta > 10% or cbdma_copy_1518 vs sync_copy_1518 delta < 5%" )
+
@staticmethod
def vhost_or_virtio_set_one_queue(session):
session.send_expect('stop', 'testpmd> ', 120)
@@ -556,16 +594,16 @@ class TestVirTioVhostCbdma(TestCase):
# check RX/TX can work normally in each queues
self.check_packets_of_each_queue(queue_list=queue_list)
- def send_and_verify(self, mode, multiple_queue=True, queue_list=[]):
+ def send_and_verify(self, mode, multiple_queue=True, queue_list=[], frame_sizes=[],pkt_length_mode='imix'):
"""
- Send packet with packet generator and verify
+ Send packet with packet generator and verif
"""
- if self.pkt_length_mode == 'imix':
+ if self.pkt_length_mode == pkt_length_mode:
self.send_imix_and_verify(mode, multiple_queue, queue_list)
return
self.throughput[mode] = dict()
- for frame_size in self.frame_sizes:
+ for frame_size in frame_sizes:
self.throughput[mode][frame_size] = dict()
payload_size = frame_size - self.headers_size
tgenInput = []
--
2.32.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma
2021-07-06 18:11 Yang Lewei
@ 2021-07-07 2:51 ` Wang, Yinan
2021-07-12 5:06 ` Tu, Lijuan
1 sibling, 0 replies; 6+ messages in thread
From: Wang, Yinan @ 2021-07-07 2:51 UTC (permalink / raw)
To: Yang, LeweiX, dts; +Cc: Yang, LeweiX
Acked-by: Yinan Wang <yinan.wang@intel.com>
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Yang Lewei
> Sent: 2021?7?7? 2:11
> To: dts@dpdk.org
> Cc: Yang, LeweiX <leweix.yang@intel.com>
> Subject: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about
> cbdma
>
> From: YangLewei <leweix.yang@intel.com>
>
> 1. change cases using imix pkts for better coverage:
> - test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations
> -
> test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_oper
> ations
> 2. add one cbdma performance case:
> - test_perf_compare_pvp_split_ring_performance
>
> Signed-off-by: YangLewei <leweix.yang@intel.com>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma
2021-07-06 18:11 Yang Lewei
2021-07-07 2:51 ` Wang, Yinan
@ 2021-07-12 5:06 ` Tu, Lijuan
1 sibling, 0 replies; 6+ messages in thread
From: Tu, Lijuan @ 2021-07-12 5:06 UTC (permalink / raw)
To: Yang, LeweiX, dts; +Cc: Yang, LeweiX
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Yang Lewei
> Sent: 2021年7月7日 2:11
> To: dts@dpdk.org
> Cc: Yang, LeweiX <leweix.yang@intel.com>
> Subject: [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about
> cbdma
>
> From: YangLewei <leweix.yang@intel.com>
>
> 1. change cases using imix pkts for better coverage:
> - test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations
> -
> test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations
> 2. add one cbdma performance case:
> - test_perf_compare_pvp_split_ring_performance
>
> Signed-off-by: YangLewei <leweix.yang@intel.com>
> ---
> tests/TestSuite_vhost_cbdma.py | 212 +++++++++++++++++++--------------
> 1 file changed, 125 insertions(+), 87 deletions(-)
>
> diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py
> index 6bd5919b..3523c4b3 100644
> --- a/tests/TestSuite_vhost_cbdma.py
> +++ b/tests/TestSuite_vhost_cbdma.py
> @@ -183,11 +183,13 @@ class TestVirTioVhostCbdma(TestCase):
> self.result_secondary = re.findall(r'TX-packets: (\w+)', out)
> self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1,
> "forward packets no correctly")
>
> +
> @property
> def check_2m_env(self):
> out = self.dut.send_expect("cat /proc/meminfo |grep Hugepagesize|awk
> '{print($2)}'", "# ")
> return True if out == '2048' else False
>
> +
> def launch_testpmd_as_vhost_user(self, command, cores="Default", dev="",
> ports = ""):
> self.pmdout_vhost_user.start_testpmd(cores=cores, param=command,
> vdevs=[dev], ports=ports, prefix="vhost")
> self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120) @@ -271,62
> +273,49 @@ class TestVirTioVhostCbdma(TestCase):
> """
> self.test_target = self.running_case
> self.expected_throughput =
> self.get_suite_cfg()['expected_throughput'][self.test_target]
> - used_cbdma_num = 4
> - queue = 2
> + used_cbdma_num = 8
> + queue = 8
> txd_rxd = 1024
> dmathr = 1024
> nb_cores = 1
> virtio_path = "/tmp/s0"
> path_mode = 'mrg_rxbuf=1,in_order=1'
> self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
> - vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dm
> athr}"
> - eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " %
> (nb_cores, txd_rxd, txd_rxd, queue, queue)
> - dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --
> enable-hw-vlan-strip {eal_params}"
> + eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
> + dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --
> enable-hw-vlan-strip {eal_params % (queue,queue)}"
> virtio_dev =
> f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queu
> es={queue},server=1"
> - vhost_dev =
> f"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'"
> + vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'"
> # launch vhost testpmd
> allow_pci = [self.dut.ports_info[0]['pci']]
> for index in range(used_cbdma_num):
> - if index < used_cbdma_num / 2:
> - allow_pci.append(self.cbdma_dev_infos[index])
> - self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2],
> dev=vhost_dev % vhost_dmas, ports=allow_pci)
> - # queue 2 start virtio testpmd, check perforamnce and RX/TX
> - mode = "dynamic_queue2"
> + allow_pci.append(self.cbdma_dev_infos[index])
> +
> + # no cbdma to launch vhost
> + self.launch_testpmd_as_vhost_user(eal_params % (queue,queue),
> self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]])
> + mode = "no_cbdma"
> self.mode_list.append(mode)
>
> self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_par
> ams, self.cores[2:4], dev=virtio_dev)
> self.send_and_verify(mode, queue_list=range(queue))
> - # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to
> 1 queues
> - self.vhost_or_virtio_set_one_queue(self.virtio_user)
> - self.send_and_verify("virtio_user_" + mode + "_change_to_1",
> queue_list=[0])
> - self.mode_list.append("virtio_user_" + mode + "_change_to_1")
> - self.virtio_user.send_expect("stop", "testpmd> ")
> - self.virtio_user.send_expect("quit", "# ")
> + self.vhost_user.send_expect("quit", "#")
> +
> + # used 4 cbdma_num and 4 queue to launch vhost
> +
> + vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.us
> ed_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
> + self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2),
> self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas),
> ports=allow_pci[:5])
> + self.send_and_verify("used_4_cbdma_num",
> queue_list=range(int(queue/2)))
> + self.mode_list.append("used_4_cbdma_num")
> + self.vhost_user.send_expect("quit", "#")
> +
> + #used 8 cbdma_num to launch vhost
> + vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.us
> ed_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{s
> elf.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],d
> mathr={dmathr}"
> + self.launch_testpmd_as_vhost_user(eal_params % (queue, queue),
> self.cores[0:2],
> + dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
> + self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
> + self.mode_list.append("used_8_cbdma_num")
> + self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
> + self.mode_list.append("used_8_cbdma_num_1")
> + self.virtio_user.send_expect("stop", "testpmd> ", 60)
> time.sleep(5)
> - self.dut.send_expect(f"rm -rf {virtio_path}", "#")
> - # queue 2 start virtio testpmd, check perforamnce and RX/TX
> -
> self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_par
> ams, self.cores[2:4], dev=virtio_dev)
> - mode = "Relaunch_dynamic_queue2"
> - self.mode_list.append(mode)
> - self.send_and_verify(mode, queue_list=range(queue))
> - # On vhost side, dynamic change rx queue numbers from 2 queue to 1
> queues
> - self.vhost_or_virtio_set_one_queue(self.vhost_user)
> - self.send_and_verify("vhost_user" + mode + "_change_to_1")
> - self.mode_list.append("vhost_user" + mode + "_change_to_1")
> - self.vhost_user.send_expect("quit", "# ")
> - time.sleep(2)
> - # Relaunch vhost with another two cbdma channels
> - mode = "Relaunch_vhost_2_cbdma"
> - self.mode_list.append(mode)
> - dmathr = 512
> - vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dm
> athr}"
> - allow_pci = [self.dut.ports_info[0]['pci']]
> - for index in range(used_cbdma_num):
> - if index >= used_cbdma_num / 2:
> - allow_pci.append(self.cbdma_dev_infos[index])
> - self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2],
> dev=vhost_dev % vhost_dmas, ports=allow_pci)
> - self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
> - self.send_and_verify(mode, queue_list=range(queue))
> - self.check_port_stats_result(self.virtio_user)
> self.virtio_user.send_expect("quit", "# ")
> self.vhost_user.send_expect("quit", "# ")
> self.result_table_print()
> @@ -427,8 +416,8 @@ class TestVirTioVhostCbdma(TestCase):
> """
> self.test_target = self.running_case
> self.expected_throughput =
> self.get_suite_cfg()['expected_throughput'][self.test_target]
> - used_cbdma_num = 4
> - queue = 2
> + used_cbdma_num = 8
> + queue = 8
> txd_rxd = 1024
> dmathr = 1024
> nb_cores = 1
> @@ -436,53 +425,41 @@ class TestVirTioVhostCbdma(TestCase):
> path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'
> self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
> vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dm
> athr}"
> - eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " %
> (nb_cores, txd_rxd, txd_rxd, queue, queue)
> - dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --
> enable-hw-vlan-strip {eal_params}"
> + eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
> + dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --
> enable-hw-vlan-strip {eal_params % (queue, queue)}"
> virtio_dev =
> f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queu
> es={queue},server=1"
> - vhost_dev =
> f"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'"
> + vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'"
> # launch vhost testpmd
> allow_pci = [self.dut.ports_info[0]['pci']]
> for index in range(used_cbdma_num):
> - if index < used_cbdma_num / 2:
> - allow_pci.append(self.cbdma_dev_infos[index])
> - self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2],
> dev=vhost_dev % vhost_dmas, ports=allow_pci)
> - # queue 2 start virtio testpmd, check perforamnce and RX/TX
> - mode = "dynamic_queue2"
> + allow_pci.append(self.cbdma_dev_infos[index])
> +
> + # no cbdma to launch vhost
> + self.launch_testpmd_as_vhost_user(eal_params % (queue,queue),
> self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]])
> + mode = "no_cbdma"
> self.mode_list.append(mode)
>
> self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_par
> ams, self.cores[2:4], dev=virtio_dev)
> self.send_and_verify(mode, queue_list=range(queue))
> - # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to
> 1 queues
> - self.vhost_or_virtio_set_one_queue(self.virtio_user)
> - self.send_and_verify("virtio_user_" + mode + "_change_to_1",
> queue_list=[0])
> - self.mode_list.append("virtio_user_" + mode + "_change_to_1")
> - self.virtio_user.send_expect("stop", "testpmd> ")
> - self.virtio_user.send_expect("quit", "# ")
> + self.vhost_user.send_expect("quit", "#")
> +
> + # used 4 cbdma_num and 4 queue to launch vhost
> + vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.us
> ed_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
> + self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2),
> self.cores[0:2],
> + dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
> + self.send_and_verify("used_4_cbdma_num",
> queue_list=range(int(queue/2)))
> + self.mode_list.append("used_4_cbdma_num")
> + self.vhost_user.send_expect("quit", "#")
> +
> + #used 8 cbdma_num to launch vhost
> + vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.us
> ed_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{s
> elf.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],d
> mathr={dmathr}"
> + self.launch_testpmd_as_vhost_user(eal_params % (queue, queue),
> self.cores[0:2],
> + dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
> + self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
> + self.mode_list.append("used_8_cbdma_num")
> + self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
> + self.mode_list.append("used_8_cbdma_num_1")
> + self.virtio_user.send_expect("stop", "testpmd> ", 60)
> time.sleep(5)
> - self.dut.send_expect(f"rm -rf {virtio_path}", "#")
> - # queue 2 start virtio testpmd, check perforamnce and RX/TX
> -
> self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_par
> ams, self.cores[2:4], dev=virtio_dev)
> - mode = "Relaunch_dynamic_queue2"
> - self.mode_list.append(mode)
> - self.send_and_verify(mode, queue_list=range(queue))
> - # On vhost side, dynamic change rx queue numbers from 2 queue to 1
> queues
> - self.vhost_or_virtio_set_one_queue(self.vhost_user)
> - self.send_and_verify("vhost_user" + mode + "_change_to_1")
> - self.mode_list.append("vhost_user" + mode + "_change_to_1")
> - self.vhost_user.send_expect("quit", "# ")
> - time.sleep(2)
> - # Relaunch vhost with another two cbdma channels
> - mode = "Relaunch_vhost_2_cbdma"
> - self.mode_list.append(mode)
> - dmathr = 512
> - vhost_dmas =
> f"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dm
> athr}"
> - allow_pci = [self.dut.ports_info[0]['pci']]
> - for index in range(used_cbdma_num):
> - if index >= used_cbdma_num / 2:
> - allow_pci.append(self.cbdma_dev_infos[index])
> - self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2],
> dev=vhost_dev % vhost_dmas, ports=allow_pci)
> - self.virtio_user.send_expect("clear port stats all", "testpmd> ", 30)
> - self.send_and_verify(mode, queue_list=range(queue))
> - self.check_port_stats_result(self.virtio_user)
> self.virtio_user.send_expect("quit", "# ")
> self.vhost_user.send_expect("quit", "# ")
> self.result_table_print()
> @@ -491,6 +468,67 @@ class TestVirTioVhostCbdma(TestCase):
> self.handle_expected(mode_list=self.mode_list)
> self.handle_results(mode_list=self.mode_list)
>
> +
> +
> + def test_perf_compare_pvp_split_ring_performance(self):
> + """
> + Test Case6: Compare PVP split ring performance between CPU copy,
> CBDMA copy and Sync copy
> + """
> + used_cbdma_num = 1
> + queue = 1
> + txd_rxd = 1024
> + eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
> + path_mode = 'mrg_rxbuf=1,in_order=1,server=1'
> + allow_pci = [self.dut.ports_info[0]['pci']]
> + self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
> + for index in range(used_cbdma_num):
> + allow_pci.append(self.cbdma_dev_infos[index])
> + path_mode = 'mrg_rxbuf=1,in_order=1'
> + vhost_vdevs =
> f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}
> ],%s'"
> + compare_pvp_split_ring_performance = "--tx-offloads=0x0 --enable-hw-
> vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
> + dev_path_mode_mapper = {
> + "sync_cbdma": ['dmathr=1024', 'dmathr=2000'],
> + "cpu": 'dmathr=0',
> + }
> + for key,dma_mode in dev_path_mode_mapper.items():
> + if key == "cpu":
> + vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=1'"
> + self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd,
> txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])
> + vdevs =
> f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=
> %d'" % queue
> +
> self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance,
> self.cores[2:4], dev=vdevs)
> + mode = "cpu_copy_64"
> + self.mode_list.append(mode)
> + self.send_and_verify(mode, frame_sizes=[64],
> pkt_length_mode=None)
> + perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]
> + self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
> + self.virtio_user.send_expect("quit", "# ")
> + self.vhost_user.send_expect("quit", "# ")
> + else:
> + self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd,
> txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]),
> ports=allow_pci)
> + vdevs =
> f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=
> %d,server=1'" % queue
> +
> self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance,
> self.cores[2:4],dev=vdevs)
> + mode = "sync_copy_64"
> + self.mode_list.append(mode)
> + self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode=None)
> + perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc]
> + mode = "cbdma_copy_1518"
> + self.mode_list.append(mode)
> +
> self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode=None)
> + perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]
> + self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
> + self.vhost_user.send_expect("quit", "# ")
> + time.sleep(3)
> + self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd,
> txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]),
> ports=allow_pci)
> + mode = "sync_copy_1518"
> + self.mode_list.append(mode)
> +
> self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode=None)
> + perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]
> + self.check_port_stats_result(self.virtio_user)
> + self.virtio_user.send_expect("quit", "# ")
> + self.vhost_user.send_expect("quit", "# ")
> + self.result_table_print()
> + self.verify(abs(perf_cbdma_copy_1518 -
> + perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05 and
> + abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1,
> + "sync_copy_64 vs. cpu_copy_64 delta > 10% or cbdma_copy_1518 vs
> + sync_copy_1518 delta < 5%" )
> +
> @staticmethod
> def vhost_or_virtio_set_one_queue(session):
> session.send_expect('stop', 'testpmd> ', 120) @@ -556,16 +594,16 @@
> class TestVirTioVhostCbdma(TestCase):
> # check RX/TX can work normally in each queues
> self.check_packets_of_each_queue(queue_list=queue_list)
>
> - def send_and_verify(self, mode, multiple_queue=True, queue_list=[]):
> + def send_and_verify(self, mode, multiple_queue=True, queue_list=[],
> frame_sizes=[],pkt_length_mode='imix'):
> """
> - Send packet with packet generator and verify
> + Send packet with packet generator and verif
> """
> - if self.pkt_length_mode == 'imix':
> + if self.pkt_length_mode == pkt_length_mode:
> self.send_imix_and_verify(mode, multiple_queue, queue_list)
> return
According to "self.pkt_length_mode = (suite_cfg or {}).get('pkt_length_mode') or 'fixed'", we know self.pkt_length_mode Is got from suite configuration.
What's your purpose of " self.pkt_length_mode == pkt_length_mode ", is it checking pkt_length_mode Is 'imix', if that, what will happen, if set self.pkt_length_mode to "None" ?
What the usage of pkt_length_mode is in suite_cfg should be considered.
>
> self.throughput[mode] = dict()
> - for frame_size in self.frame_sizes:
> + for frame_size in frame_sizes:
> self.throughput[mode][frame_size] = dict()
> payload_size = frame_size - self.headers_size
> tgenInput = []
> --
> 2.32.0
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-07-26 5:54 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-13 15:50 [dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about cbdma Yang Lewei
2021-07-22 23:19 ` Wang, Yinan
2021-07-26 5:54 ` Tu, Lijuan
-- strict thread matches above, loose matches on Subject: below --
2021-07-06 18:11 Yang Lewei
2021-07-07 2:51 ` Wang, Yinan
2021-07-12 5:06 ` Tu, Lijuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).