From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5C5F9A0579; Fri, 9 Apr 2021 08:14:58 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3E90914132D; Fri, 9 Apr 2021 08:14:58 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 48F164068E for ; Fri, 9 Apr 2021 08:14:55 +0200 (CEST) IronPort-SDR: z6FUVGGMGHY7Csu/0Q0ICTcjkWTvdfydmums35hBvG4R8sI6o2at22jPa3BT7m0k/mY54+im16 LClsxrGbSYAg== X-IronPort-AV: E=McAfee;i="6000,8403,9948"; a="255031443" X-IronPort-AV: E=Sophos;i="5.82,208,1613462400"; d="scan'208";a="255031443" Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Apr 2021 23:14:54 -0700 IronPort-SDR: ERlNDOUKnqMqDaqka0Fn273KkyY28ztkv9/AvjpyiS+uexldeUxH8WMufDpHwWXmO9FL5YYbhc PNwIjcfH/tLA== X-IronPort-AV: E=Sophos;i="5.82,208,1613462400"; d="scan'208";a="397376448" Received: from unknown (HELO localhost.localdomain) ([10.240.183.222]) by orsmga002-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Apr 2021 23:14:53 -0700 From: Ling Wei To: dts@dpdk.org Cc: Ling Wei Date: Fri, 9 Apr 2021 14:13:18 +0800 Message-Id: <20210409061318.255521-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Subject: [dts] [PATCH V1] tests/vhost_cbdma: add 2 packed ring cbdma testcase X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Sender: "dts" 1.Add 2 packed ring cbdma case 4 and 5. 2.Add 2 packed ring cdbma case expecetd value in config file. Signed-off-by: Ling Wei --- conf/vhost_cbdma.cfg | 3 +- tests/TestSuite_vhost_cbdma.py | 133 ++++++++++++++++++++++++++++++++- 2 files changed, 130 insertions(+), 6 deletions(-) diff --git a/conf/vhost_cbdma.cfg b/conf/vhost_cbdma.cfg index 2777d791..66981031 100644 --- a/conf/vhost_cbdma.cfg +++ b/conf/vhost_cbdma.cfg @@ -3,5 +3,4 @@ update_expected =3D True test_parameters =3D {64: [1024], 1518: [1024]} test_duration =3D 60 accepted_tolerance =3D 2 -expected_throughput =3D {'test_perf_dynamic_queue_number_cbdma_vhost_enque= ue_operations': {'dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, = 'virtio_user_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0= .00}}, 'Relaunch_dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, '= vhost_userRelaunch_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1= 024: 0.00}}, 'Relaunch_vhost_2_cbdma': {64: {1024: 0.00}, 1518: {1024: 0.00= }}}, 'test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations': {'= inorder_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_m= ergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'merge= able_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path_Restart= Vhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path= ': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path_Rest= artVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path': {6= 4: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path_RestartVhost': {6= 4: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path': {64: {1024: 0.00}, = 1518: {1024: 0.00}}, 'vector_rx_path_RestartVhost': {64: {1024: 0.00}, 1518= : {1024: 0.00}}},} - +expected_throughput =3D {'test_perf_dynamic_queue_number_cbdma_vhost_enque= ue_operations': {'dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, = 'virtio_user_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0= .00}}, 'Relaunch_dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, '= vhost_userRelaunch_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1= 024: 0.00}}, 'Relaunch_vhost_2_cbdma': {64: {1024: 0.00}, 1518: {1024: 0.00= }}}, 'test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations': {'= inorder_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_m= ergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'merge= able_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path_Restart= Vhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path= ': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path_Rest= artVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path': {6= 4: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path_RestartVhost': {6= 4: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path': {64: {1024: 0.00}, = 1518: {1024: 0.00}}, 'vector_rx_path_RestartVhost': {64: {1024: 0.00}, 1518= : {1024: 0.00}}},'test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue= _operations': {'dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'v= irtio_user_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.0= 0}}, 'Relaunch_dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vh= ost_userRelaunch_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {102= 4: 0.00}}, 'Relaunch_vhost_2_cbdma': {64: {1024: 0.00}, 1518: {1024: 0.00}}= },'test_perf_pvp_packed_all_path_with_cbdma_vhost_enqueue_operations': {'in= order_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_mer= geable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeab= le_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path_RestartVh= ost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path':= {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path_Restar= tVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path': {64:= {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path_RestartVhost': {64:= {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path': {64: {1024: 0.00}, 15= 18: {1024: 0.00}}, 'vector_rx_path_RestartVhost': {64: {1024: 0.00}, 1518: = {1024: 0.00}}},} diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py index 260c534e..1b4de8b6 100644 --- a/tests/TestSuite_vhost_cbdma.py +++ b/tests/TestSuite_vhost_cbdma.py @@ -110,6 +110,15 @@ class TestVirTioVhostCbdma(TestCase): self.dut.send_expect("rm -rf /tmp/s0", "#") self.mode_list =3D [] =20 + def bind_nic_driver(self, ports, driver=3D""): + for port in ports: + netdev =3D self.dut.ports_info[port]['port'] + driver_now =3D netdev.get_nic_driver() + if driver =3D=3D "": + driver =3D netdev.default_driver + if driver !=3D driver_now: + netdev.bind_driver(driver=3Ddriver) + def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num): """ get all cbdma ports @@ -207,8 +216,7 @@ class TestVirTioVhostCbdma(TestCase): =20 def test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations(s= elf): """ - used one cbdma port bonding igb_uio - :return: + Test Case 1: PVP Split all path with DMA-accelerated vhost enqueue """ self.test_target =3D self.running_case self.expected_throughput =3D self.get_suite_cfg()['expected_throug= hput'][self.test_target] @@ -254,8 +262,7 @@ class TestVirTioVhostCbdma(TestCase): =20 def test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations(self= ): """ - # used 2 cbdma ports bonding igb_uio - :return: + Test Case2: Split ring dynamic queue number test for DMA-accelerat= ed vhost Tx operations """ self.test_target =3D self.running_case self.expected_throughput =3D self.get_suite_cfg()['expected_throug= hput'][self.test_target] @@ -363,6 +370,122 @@ class TestVirTioVhostCbdma(TestCase): check_value +=3D len(re.findall('vid{},\S+threshold:{}'.format= (vid_dict[dma], dma), str(return_param))) self.verify(check_value =3D=3D used_cbdma_num, "Check failed: Actu= al value:{}".format(return_param)) =20 + def test_perf_pvp_packed_all_path_with_cbdma_vhost_enqueue_operations(= self): + """ + Test Case 4: PVP packed ring all path with DMA-accelerated vhost e= nqueue + """ + self.test_target =3D self.running_case + self.expected_throughput =3D self.get_suite_cfg()['expected_throug= hput'][self.test_target] + txd_rxd =3D 1024 + dmathr =3D 1024 + eal_tx_rxd =3D ' --nb-cores=3D%d --txd=3D%d --rxd=3D%d' + queue =3D 1 + used_cbdma_num =3D 1 + self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) + vhost_vdevs =3D f"'net_vhost0,iface=3D/tmp/s0,queues=3D%d,dmas=3D[= txq0@{self.device_str}],dmathr=3D%d'" + dev_path_mode_mapper =3D { + "inorder_mergeable_path": 'mrg_rxbuf=3D1,in_order=3D1,packed_v= q=3D1', + "mergeable_path": 'mrg_rxbuf=3D1,in_order=3D0,packed_vq=3D1', + "inorder_non_mergeable_path": 'mrg_rxbuf=3D0,in_order=3D1,pack= ed_vq=3D1', + "non_mergeable_path": 'mrg_rxbuf=3D0,in_order=3D0,packed_vq=3D= 1', + "vector_rx_path": 'mrg_rxbuf=3D0,in_order=3D0,packed_vq=3D1', + } + pvp_split_all_path_virtio_params =3D "--tx-offloads=3D0x0 --enable= -hw-vlan-strip --nb-cores=3D%d --txd=3D%d --rxd=3D%d" % (queue, txd_rxd, tx= d_rxd) + allow_pci =3D [self.dut.ports_info[0]['pci']] + for index in range(used_cbdma_num): + allow_pci.append(self.cbdma_dev_infos[index]) + self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, tx= d_rxd), self.cores[0:2], dev=3Dvhost_vdevs % (queue, dmathr), ports=3Dallow= _pci) + for key, path_mode in dev_path_mode_mapper.items(): + if key =3D=3D "vector_rx_path": + pvp_split_all_path_virtio_params =3D eal_tx_rxd % (queue, = txd_rxd, txd_rxd) + vdevs =3D f"'net_virtio_user0,mac=3D{self.virtio_mac},path=3D/= tmp/s0,{path_mode},queues=3D%d'" % queue + self.diff_param_launch_send_and_verify(key, pvp_split_all_path= _virtio_params, vdevs, self.cores[2:4], is_quit=3DFalse) + self.mode_list.append(key) + # step3 restart vhost port, then check throughput again + key +=3D "_RestartVhost" + self.vhost_user.send_expect('show port stats all', 'testpmd> '= , 10) + self.vhost_user.send_expect('stop', 'testpmd> ', 10) + self.vhost_user.send_expect('start', 'testpmd> ', 10) + self.vhost_user.send_expect('show port info all', 'testpmd> ',= 30) + self.vhost_user.send_expect('show port stats all', 'testpmd> '= , 10) + self.diff_param_launch_send_and_verify(key, pvp_split_all_path= _virtio_params, vdevs, + self.cores[2:4], launch= _virtio=3DFalse) + self.mode_list.append(key) + self.vhost_user.send_expect("quit", "# ") + self.result_table_print() + self.handle_expected(mode_list=3Dself.mode_list) + self.handle_results(mode_list=3Dself.mode_list) + + def test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operatio= ns(self): + """ + Test Case5: Packed ring dynamic queue number test for DMA-accelera= ted vhost Tx operations + """ + self.test_target =3D self.running_case + self.expected_throughput =3D self.get_suite_cfg()['expected_throug= hput'][self.test_target] + used_cbdma_num =3D 4 + queue =3D 2 + txd_rxd =3D 1024 + dmathr =3D 1024 + nb_cores =3D 1 + virtio_path =3D "/tmp/s0" + path_mode =3D 'mrg_rxbuf=3D1,in_order=3D1,packed_vq=3D1' + self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) + vhost_dmas =3D f"dmas=3D[txq0@{self.used_cbdma[0]};txq1@{self.used= _cbdma[1]}],dmathr=3D{dmathr}" + eal_params =3D " --nb-cores=3D%d --txd=3D%d --rxd=3D%d --txq=3D%d = --rxq=3D%d " % (nb_cores, txd_rxd, txd_rxd, queue, queue) + dynamic_queue_number_cbdma_virtio_params =3D f" --tx-offloads=3D0= x0 --enable-hw-vlan-strip {eal_params}" + virtio_dev =3D f"net_virtio_user0,mac=3D{self.virtio_mac},path=3D{= virtio_path},{path_mode},queues=3D{queue},server=3D1" + vhost_dev =3D f"'net_vhost0,iface=3D{virtio_path},queues=3D{queue}= ,client=3D1,%s'" + # launch vhost testpmd + allow_pci =3D [self.dut.ports_info[0]['pci']] + for index in range(used_cbdma_num): + if index < used_cbdma_num / 2: + allow_pci.append(self.cbdma_dev_infos[index]) + self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev= =3Dvhost_dev % vhost_dmas, ports=3Dallow_pci) + # queue 2 start virtio testpmd, check perforamnce and RX/TX + mode =3D "dynamic_queue2" + self.mode_list.append(mode) + self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virt= io_params, self.cores[2:4], dev=3Dvirtio_dev) + self.send_and_verify(mode, queue_list=3Drange(queue)) + # On virtio-user side, dynamic change rx/tx queue numbers from 2 q= ueue to 1 queues + self.vhost_or_virtio_set_one_queue(self.virtio_user) + self.send_and_verify("virtio_user_" + mode + "_change_to_1", queue= _list=3D[0]) + self.mode_list.append("virtio_user_" + mode + "_change_to_1") + self.virtio_user.send_expect("stop", "testpmd> ") + self.virtio_user.send_expect("quit", "# ") + time.sleep(5) + self.dut.send_expect(f"rm -rf {virtio_path}", "#") + # queue 2 start virtio testpmd, check perforamnce and RX/TX + self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virt= io_params, self.cores[2:4], dev=3Dvirtio_dev) + mode =3D "Relaunch_dynamic_queue2" + self.mode_list.append(mode) + self.send_and_verify(mode, queue_list=3Drange(queue)) + # On vhost side, dynamic change rx queue numbers from 2 queue to 1= queues + self.vhost_or_virtio_set_one_queue(self.vhost_user) + self.send_and_verify("vhost_user" + mode + "_change_to_1") + self.mode_list.append("vhost_user" + mode + "_change_to_1") + self.vhost_user.send_expect("quit", "# ") + time.sleep(2) + # Relaunch vhost with another two cbdma channels + mode =3D "Relaunch_vhost_2_cbdma" + self.mode_list.append(mode) + dmathr =3D 512 + vhost_dmas =3D f"dmas=3D[txq0@{self.used_cbdma[2]};txq1@{self.used= _cbdma[3]}],dmathr=3D{dmathr}" + allow_pci =3D [self.dut.ports_info[0]['pci']] + for index in range(used_cbdma_num): + if index >=3D used_cbdma_num / 2: + allow_pci.append(self.cbdma_dev_infos[index]) + self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev= =3Dvhost_dev % vhost_dmas, ports=3Dallow_pci) + self.virtio_user.send_expect("clear port stats all", "testpmd> ", = 30) + self.send_and_verify(mode, queue_list=3Drange(queue)) + self.check_port_stats_result(self.virtio_user) + self.virtio_user.send_expect("quit", "# ") + self.vhost_user.send_expect("quit", "# ") + self.result_table_print() + # result_rows =3D [[], [64, 'dynamic_queue2', 7.4959375, 12.593175= ], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]] + result_rows =3D self.result_table_getrows() # + self.handle_expected(mode_list=3Dself.mode_list) + self.handle_results(mode_list=3Dself.mode_list) + @staticmethod def vhost_or_virtio_set_one_queue(session): session.send_expect('stop', 'testpmd> ', 120) @@ -533,6 +656,8 @@ class TestVirTioVhostCbdma(TestCase): """ self.dut.send_expect("killall -I %s" % self.testpmd_name, '#', 20) self.bind_cbdma_device_to_kernel() + if self.running_case =3D=3D 'test_check_threshold_value_with_cbdma= ': + self.bind_nic_driver(self.dut_ports, self.drivername) =20 def tear_down_all(self): """ --=20 2.25.1