From: "Wang, Yinan" <yinan.wang@intel.com>
To: "Xiao, QimaiX" <qimaix.xiao@intel.com>, "dts@dpdk.org" <dts@dpdk.org>
Subject: Re: [dts] [PATCH V2]pvp_vhost_user_reconnect: check perf data in each reconnect loop
Date: Fri, 12 Jun 2020 02:12:02 +0000 [thread overview]
Message-ID: <BN6PR11MB169805823467DC75702430188F810@BN6PR11MB1698.namprd11.prod.outlook.com> (raw)
In-Reply-To: <MWHPR1101MB22548E2BE3CA09D9404A8744FB800@MWHPR1101MB2254.namprd11.prod.outlook.com>
Acked-by: Wang, Yinan <yinan.wang@intel.com>
> -----Original Message-----
> From: dts <dts-bounces@dpdk.org> On Behalf Of Xiao, QimaiX
> Sent: 2020?6?11? 17:32
> To: dts@dpdk.org
> Subject: Re: [dts] [PATCH V2]pvp_vhost_user_reconnect: check perf data in each
> reconnect loop
>
> Tested-by: Xiao, QimaiX <qimaix.xiao@intel.com>
>
> Regards,
> Xiao Qimai
>
> > -----Original Message-----
> > From: Xiao, QimaiX <qimaix.xiao@intel.com>
> > Sent: Thursday, June 11, 2020 5:23 PM
> > To: dts@dpdk.org
> > Cc: Xiao, QimaiX <qimaix.xiao@intel.com>
> > Subject: [dts][PATCH V2]pvp_vhost_user_reconnect: check perf data in each
> > reconnect loop
> >
> > * 1.check perf data in each reconnect loop
> > * 2.increase perf descend tolerance from 5% to 15% because of network
> > fluctuations
> >
> > Signed-off-by: Xiao Qimai <qimaix.xiao@intel.com>
> > ---
> > tests/TestSuite_pvp_vhost_user_reconnect.py | 110 ++++++++---------------
> > -----
> > 1 file changed, 29 insertions(+), 81 deletions(-)
> >
> > diff --git a/tests/TestSuite_pvp_vhost_user_reconnect.py
> > b/tests/TestSuite_pvp_vhost_user_reconnect.py
> > index fa86d02..b609115 100644
> > --- a/tests/TestSuite_pvp_vhost_user_reconnect.py
> > +++ b/tests/TestSuite_pvp_vhost_user_reconnect.py
> > @@ -66,7 +66,7 @@ class TestPVPVhostUserReconnect(TestCase):
> > else:
> > self.socket_mem = '1024,1024'
> >
> > - self.reconnect_times = 2
> > + self.reconnect_times = 5
> > self.vm_num = 1
> > self.frame_sizes = [64, 1518]
> > self.virtio_ip = ["1.1.1.2", "1.1.1.3"] @@ -241,9 +241,9 @@ class
> > TestPVPVhostUserReconnect(TestCase):
> > self.vm_dut[0].send_expect(
> > 'iperf -s -p 12345 -i 1 > iperf_server.log &', '', 10)
> > self.vm_dut[1].send_expect(
> > - 'iperf -c %s -p 12345 -i 1 -t 5 > iperf_client.log &' %
> > + 'iperf -c %s -p 12345 -i 1 -t 10 > iperf_client.log &' %
> > self.virtio_ip[0], '', 60)
> > - time.sleep(20)
> > + time.sleep(15)
> >
> > def iperf_result_verify(self, cycle, tinfo):
> > """
> > @@ -306,14 +306,10 @@ class TestPVPVhostUserReconnect(TestCase):
> > if isinstance(self.before_data, dict):
> > for i in self.frame_sizes:
> > self.verify(
> > - (self.before_data[i] - self.vhost_reconnect_data[i]) <
> > self.before_data[i] * 0.05, 'verify reconnect vhost speed failed')
> > - self.verify(
> > - (self.before_data[i] - self.vm_reconnect_data[i]) <
> > self.before_data[i] * 0.05, 'verify reconnect vm speed failed')
> > + (self.before_data[i] - self.reconnect_data[i]) <
> > + self.before_data[i] * 0.15, 'verify reconnect speed failed')
> > else:
> > self.verify(
> > - (self.before_data - self.vhost_reconnect_data < self.before_data *
> > 0.05, 'verify reconnect vhost speed failed'))
> > - self.verify(
> > - (self.before_data - self.vm_reconnect_data < self.before_data *
> > 0.05, 'verify reconnect vm speed failed'))
> > + (self.before_data - self.reconnect_data) <
> > + self.before_data * 0.15, 'verify reconnect speed failed')
> >
> > def test_perf_split_ring_reconnet_one_vm(self):
> > """
> > @@ -332,32 +328,21 @@ class TestPVPVhostUserReconnect(TestCase):
> > vm_cycle = 1
> > # reconnet from vhost
> > self.logger.info('now reconnect from vhost')
> > - vhost_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT testpmd", "# ")
> > self.launch_testpmd_as_vhost_user()
> > - vhost_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > vhost"))
> > -
> > - self.vhost_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vhost_tmp]
> > - self.vhost_reconnect_data[frame_size] =
> > sum(size_value)/len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from vhost")
> > + self.check_reconnect_perf()
> >
> > # reconnet from qemu
> > self.logger.info('now reconnect from vm')
> > - vm_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
> > self.start_vms()
> > self.vm_testpmd_start()
> > - vm_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > VM"))
> > -
> > - self.vm_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vm_tmp]
> > - self.vm_reconnect_data[frame_size] =
> > sum(size_value)/len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from VM")
> > + self.check_reconnect_perf()
> > self.result_table_print()
> > - self.check_reconnect_perf()
> >
> > def test_perf_split_ring_reconnet_two_vms(self):
> > """
> > @@ -376,31 +361,21 @@ class TestPVPVhostUserReconnect(TestCase):
> > vm_cycle = 1
> > # reconnet from vhost
> > self.logger.info('now reconnect from vhost')
> > - vhost_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT testpmd", "# ")
> > self.launch_testpmd_as_vhost_user()
> > - vhost_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > vhost"))
> > - self.vhost_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vhost_tmp]
> > - self.vhost_reconnect_data[frame_size] = sum(size_value) /
> > len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from vhost")
> > + self.check_reconnect_perf()
> >
> > # reconnet from qemu
> > self.logger.info('now reconnect from vm')
> > - vm_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
> > self.start_vms()
> > self.vm_testpmd_start()
> > - vm_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > VM"))
> > -
> > - self.vm_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vm_tmp]
> > - self.vm_reconnect_data[frame_size] =
> > sum(size_value)/len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from VM")
> > + self.check_reconnect_perf()
> > self.result_table_print()
> > - self.check_reconnect_perf()
> >
> > def test_perf_split_ring_vm2vm_virtio_net_reconnet_two_vms(self):
> > """
> > @@ -419,13 +394,12 @@ class TestPVPVhostUserReconnect(TestCase):
> > vm_cycle = 1
> > # reconnet from vhost
> > self.logger.info('now reconnect from vhost')
> > - vhost_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT testpmd", "# ")
> > self.launch_testpmd_as_vhost_user_with_no_pci()
> > self.start_iperf()
> > - vhost_tmp.append(self.iperf_result_verify(vm_cycle, 'reconnet from
> > vhost'))
> > - self.vhost_reconnect_data = sum(vhost_tmp)/len(vhost_tmp)
> > + self.reconnect_data = self.iperf_result_verify(vm_cycle, 'reconnet
> > from vhost')
> > + self.check_reconnect_perf()
> >
> > # reconnet from VM
> > self.logger.info('now reconnect from vm') @@ -437,10 +411,9 @@ class
> > TestPVPVhostUserReconnect(TestCase):
> > self.start_vms()
> > self.config_vm_intf()
> > self.start_iperf()
> > - vm_tmp.append(self.iperf_result_verify(vm_cycle, 'reconnet from
> > vm'))
> > - self.vm_reconnect_data = sum(vm_tmp)/len(vm_tmp)
> > + self.reconnect_data = self.iperf_result_verify(vm_cycle, 'reconnet
> > from vm')
> > + self.check_reconnect_perf()
> > self.result_table_print()
> > - self.check_reconnect_perf()
> >
> > def test_perf_packed_ring_reconnet_one_vm(self):
> > """
> > @@ -448,7 +421,6 @@ class TestPVPVhostUserReconnect(TestCase):
> > """
> > self.header_row = ["Mode", "FrameSize(B)", "Throughput(Mpps)",
> > "LineRate(%)", "Cycle", "Queue Number"]
> > - self.res = dict().fromkeys(["before_relaunch", "after_relaunch"], list())
> > self.result_table_create(self.header_row)
> > vm_cycle = 0
> > self.vm_num = 1
> > @@ -459,31 +431,22 @@ class TestPVPVhostUserReconnect(TestCase):
> >
> > vm_cycle = 1
> > # reconnet from vhost
> > - vhost_tmp = list()
> > self.logger.info('now reconnect from vhost')
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT testpmd", "# ")
> > self.launch_testpmd_as_vhost_user()
> > - vhost_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > vhost"))
> > - self.vhost_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vhost_tmp]
> > - self.vhost_reconnect_data[frame_size] = sum(size_value) /
> > len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from vhost")
> > + self.check_reconnect_perf()
> >
> > # reconnet from qemu
> > self.logger.info('now reconnect from vm')
> > - vm_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
> > self.start_vms(packed=True)
> > self.vm_testpmd_start()
> > - vm_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > VM"))
> > - self.vm_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vm_tmp]
> > - self.vm_reconnect_data[frame_size] = sum(size_value) /
> > len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from VM")
> > + self.check_reconnect_perf()
> > self.result_table_print()
> > - self.check_reconnect_perf()
> >
> > def test_perf_packed_ring_reconnet_two_vms(self):
> > """
> > @@ -491,7 +454,6 @@ class TestPVPVhostUserReconnect(TestCase):
> > """
> > self.header_row = ["Mode", "FrameSize(B)", "Throughput(Mpps)",
> > "LineRate(%)", "Cycle", "Queue Number"]
> > - self.res = dict().fromkeys(["before_relaunch", "after_relaunch"], list())
> > self.result_table_create(self.header_row)
> > vm_cycle = 0
> > self.vm_num = 2
> > @@ -503,37 +465,26 @@ class TestPVPVhostUserReconnect(TestCase):
> > vm_cycle = 1
> > # reconnet from vhost
> > self.logger.info('now reconnect from vhost')
> > - vhost_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT testpmd", "# ")
> > self.launch_testpmd_as_vhost_user()
> > - vhost_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > vhost"))
> > -
> > - self.vhost_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vhost_tmp]
> > - self.vhost_reconnect_data[frame_size] = sum(size_value) /
> > len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from vhost")
> > + self.check_reconnect_perf()
> > # reconnet from qemu
> > self.logger.info('now reconnect from vm')
> > - vm_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ")
> > self.start_vms(packed=True)
> > self.vm_testpmd_start()
> > - vm_tmp.append(self.send_and_verify(vm_cycle, "reconnet from
> > VM"))
> > - self.vm_reconnect_data = dict()
> > - for frame_size in self.frame_sizes:
> > - size_value = [i[frame_size] for i in vm_tmp]
> > - self.vm_reconnect_data[frame_size] = sum(size_value) /
> > len(size_value)
> > + self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet
> > from VM")
> > + self.check_reconnect_perf()
> > self.result_table_print()
> > - self.check_reconnect_perf()
> >
> > def test_perf_packed_ring_virtio_net_reconnet_two_vms(self):
> > """
> > test the iperf traffice can resume after reconnet
> > """
> > self.header_row = ["Mode", "[M|G]bits/sec", "Cycle"]
> > - self.res = dict().fromkeys(["before_relaunch", "after_relaunch"], list())
> > self.result_table_create(self.header_row)
> > self.vm_num = 2
> > vm_cycle = 0
> > @@ -546,17 +497,15 @@ class TestPVPVhostUserReconnect(TestCase):
> > vm_cycle = 1
> > # reconnet from vhost
> > self.logger.info('now reconnect from vhost')
> > - vhost_tmp = list()
> > for i in range(self.reconnect_times):
> > self.dut.send_expect("killall -s INT testpmd", "# ")
> > self.launch_testpmd_as_vhost_user_with_no_pci()
> > self.start_iperf()
> > - vhost_tmp.append(self.iperf_result_verify(vm_cycle, 'reconnet from
> > vhost'))
> > - self.vhost_reconnect_data = sum(vhost_tmp)/len(vhost_tmp)
> > + self.reconnect_data = self.iperf_result_verify(vm_cycle, 'reconnet
> > from vhost')
> > + self.check_reconnect_perf()
> >
> > # reconnet from VM
> > self.logger.info('now reconnect from vm')
> > - vm_tmp = list()
> > for i in range(self.reconnect_times):
> > self.vm_dut[0].send_expect('rm iperf_server.log', '# ', 10)
> > self.vm_dut[1].send_expect('rm iperf_client.log', '# ', 10) @@ -564,10
> > +513,9 @@ class TestPVPVhostUserReconnect(TestCase):
> > self.start_vms(packed=True)
> > self.config_vm_intf()
> > self.start_iperf()
> > - vm_tmp.append(self.iperf_result_verify(vm_cycle, 'reconnet from
> > vm'))
> > - self.vm_reconnect_data = sum(vm_tmp)/len(vm_tmp)
> > + self.reconnect_data = self.iperf_result_verify(vm_cycle, 'reconnet
> > from vm')
> > + self.check_reconnect_perf()
> > self.result_table_print()
> > - self.check_reconnect_perf()
> >
> > def tear_down(self):
> > #
> > --
> > 1.8.3.1
next prev parent reply other threads:[~2020-06-12 2:12 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-11 9:22 Xiao Qimai
2020-06-11 9:31 ` Xiao, QimaiX
2020-06-12 2:12 ` Wang, Yinan [this message]
2020-06-19 3:39 ` Tu, Lijuan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=BN6PR11MB169805823467DC75702430188F810@BN6PR11MB1698.namprd11.prod.outlook.com \
--to=yinan.wang@intel.com \
--cc=dts@dpdk.org \
--cc=qimaix.xiao@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).