* [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration
@ 2019-10-10 0:19 lihong
2019-10-10 0:19 ` [dts] [PATCH V1 2/3][migration] framework: update code support migration lihong
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: lihong @ 2019-10-10 0:19 UTC (permalink / raw)
To: dts; +Cc: yinan.wang, lihong
Signed-off-by: lihong <lihongx.ma@intel.com>
---
conf/vhost_user_live_migration.cfg | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/conf/vhost_user_live_migration.cfg b/conf/vhost_user_live_migration.cfg
index c687acd..f8b192d 100644
--- a/conf/vhost_user_live_migration.cfg
+++ b/conf/vhost_user_live_migration.cfg
@@ -90,6 +90,11 @@
# listending tcp port
# vm configuration for vhost user live migration case
+# host_share_dir config the dir of vm img on host
+# backup_mount_path config the mount dir on backup
+[mount_info]
+host_share_dir=/home/vm-image
+backup_mount_path=/mnt/nfs
[host]
cpu =
model=host,number=4,cpupin=5 6 7 8;
@@ -101,8 +106,6 @@ login =
user=root,password=tester;
daemon =
enable=yes;
-serial_port =
- enable=yes;
[backup]
cpu =
model=host,number=4,cpupin=5 6 7 8;
@@ -116,5 +119,3 @@ daemon =
enable=yes;
migration =
enable=yes,port=4444;
-serial_port =
- enable=yes;
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dts] [PATCH V1 2/3][migration] framework: update code support migration
2019-10-10 0:19 [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration lihong
@ 2019-10-10 0:19 ` lihong
2019-10-10 8:04 ` Wang, Yinan
2019-10-10 0:19 ` [dts] [PATCH V1 3/3][migration] tests/vhost_user_live_migration: update code lihong
` (2 subsequent siblings)
3 siblings, 1 reply; 7+ messages in thread
From: lihong @ 2019-10-10 0:19 UTC (permalink / raw)
To: dts; +Cc: yinan.wang, lihong
Signed-off-by: lihong <lihongx.ma@intel.com>
---
framework/virt_base.py | 8 ++++++++
framework/virt_dut.py | 12 ++++++++----
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/framework/virt_base.py b/framework/virt_base.py
index 7c1e1de..6e3462c 100644
--- a/framework/virt_base.py
+++ b/framework/virt_base.py
@@ -65,6 +65,8 @@ class VirtBase(object):
self.host_dut = dut
self.vm_name = vm_name
self.suite = suite_name
+ # indicate whether the current vm is migration vm
+ self.migration_vm = False
# create self used host session, need close it later
self.host_session = self.host_dut.new_session(self.vm_name)
@@ -330,6 +332,8 @@ class VirtBase(object):
"""
try:
if self.vm_status is ST_PAUSE:
+ # flag current vm is migration vm
+ self.migration_vm = True
# connect backup vm dut and it just inherited from host
vm_dut = self.instantiate_vm_dut(set_target, cpu_topo, bind_dev=False, autodetect_topo=False)
except Exception as vm_except:
@@ -419,9 +423,13 @@ class VirtBase(object):
vm_dut.host_dut = self.host_dut
vm_dut.host_session = self.host_session
vm_dut.init_log()
+ vm_dut.migration_vm = self.migration_vm
read_cache = False
skip_setup = self.host_dut.skip_setup
+ # if current vm is migration vm, skip compile dpdk
+ if self.migration_vm:
+ skip_setup = True
base_dir = self.host_dut.base_dir
vm_dut.set_speedup_options(read_cache, skip_setup)
diff --git a/framework/virt_dut.py b/framework/virt_dut.py
index b6f40d8..e4394b9 100644
--- a/framework/virt_dut.py
+++ b/framework/virt_dut.py
@@ -58,6 +58,7 @@ class VirtDut(DPDKdut):
self.hyper = hyper
self.cpu_topo = cpu_topo
self.dut_id = dut_id
+ self.migration_vm = False
self.vm_ip = crb['IP']
self.NAME = 'virtdut' + LOG_NAME_SEP + '%s' % self.vm_ip
@@ -186,10 +187,13 @@ class VirtDut(DPDKdut):
self.update_ports()
# restore dut ports to kernel
- if self.virttype != 'XEN':
- self.restore_interfaces()
- else:
- self.restore_interfaces_domu()
+ # if current vm is migration vm, skip restore dut ports
+ # because there maybe have some app have run
+ if not self.migration_vm:
+ if self.virttype != 'XEN':
+ self.restore_interfaces()
+ else:
+ self.restore_interfaces_domu()
# rescan ports after interface up
self.rescan_ports()
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dts] [PATCH V1 3/3][migration] tests/vhost_user_live_migration: update code
2019-10-10 0:19 [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration lihong
2019-10-10 0:19 ` [dts] [PATCH V1 2/3][migration] framework: update code support migration lihong
@ 2019-10-10 0:19 ` lihong
2019-10-10 8:03 ` Wang, Yinan
2019-10-10 8:04 ` [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration Wang, Yinan
2019-10-12 6:09 ` Tu, Lijuan
3 siblings, 1 reply; 7+ messages in thread
From: lihong @ 2019-10-10 0:19 UTC (permalink / raw)
To: dts; +Cc: yinan.wang, lihong
Signed-off-by: lihong <lihongx.ma@intel.com>
---
tests/TestSuite_vhost_user_live_migration.py | 532 +++++++++++++++++----------
1 file changed, 332 insertions(+), 200 deletions(-)
diff --git a/tests/TestSuite_vhost_user_live_migration.py b/tests/TestSuite_vhost_user_live_migration.py
index 9bd3237..fa21b93 100644
--- a/tests/TestSuite_vhost_user_live_migration.py
+++ b/tests/TestSuite_vhost_user_live_migration.py
@@ -1,10 +1,40 @@
-# <COPYRIGHT_TAG>
+# BSD LICENSE
+#
+# Copyright(c) <2019> Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import time
-
-from qemu_kvm import QEMUKvm
+import utils
+from virt_common import VM
from test_case import TestCase
+from config import UserConf
from exception import VirtDutInitException
@@ -13,109 +43,174 @@ class TestVhostUserLiveMigration(TestCase):
def set_up_all(self):
# verify at least two duts
self.verify(len(self.duts) >= 2, "Insufficient duts for live migration!!!")
+ self.host_dut = self.duts[0]
+ self.backup_dut = self.duts[1]
# each dut required one ports
- self.dut_ports = self.dut.get_ports()
- # Verify that enough ports are available
- self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
- self.dut_port = self.dut_ports[0]
- dut_ip = self.dut.crb['My IP']
- self.host_tport = self.tester.get_local_port_bydut(self.dut_port, dut_ip)
- self.host_tintf = self.tester.get_interface(self.host_tport)
-
- self.backup_ports = self.duts[1].get_ports()
- # Verify that enough ports are available
- self.verify(len(self.backup_ports) >= 1, "Insufficient ports for testing")
- self.backup_port = self.backup_ports[0]
- # backup host ip will be used in migrate command
- self.backup_dutip = self.duts[1].crb['My IP']
- self.backup_tport = self.tester.get_local_port_bydut(self.backup_port, self.backup_dutip)
- self.backup_tintf = self.tester.get_interface(self.backup_tport)
-
- # Use testpmd as vhost-user application on host/backup server
- self.vhost = "./%s/app/testpmd" % self.target
- self.vm_testpmd = "./%s/app/testpmd -c 0x3 -n 4 -- -i" % self.target
- self.virio_mac = "52:54:00:00:00:01"
+ host_dut_ports = self.host_dut.get_ports()
+ backup_dut_ports = self.backup_dut.get_ports()
+ self.verify(len(host_dut_ports) >= 1 and len(backup_dut_ports) >= 1,
+ "Insufficient ports for testing")
+
+ # get mount info from cfg file
+ conf_info = UserConf('conf/%s.cfg' % self.suite_name)
+ conf_session = conf_info.conf._sections['mount_info']
+ self.mount_path = conf_session['backup_mount_path']
+ self.share_path = conf_session['host_share_dir']
+ # config the mount server and client
+ self.config_mount_server()
+ self.config_mount_client()
+
+ host_dut_port = host_dut_ports[0]
+ host_dut_ip = self.host_dut.crb['My IP']
+ backup_dut_port = backup_dut_ports[0]
+ self.backup_dut_ip = self.backup_dut.crb['My IP']
+
+ host_tport = self.tester.get_local_port_bydut(host_dut_port, host_dut_ip)
+ backup_tport = self.tester.get_local_port_bydut(backup_dut_port, self.backup_dut_ip)
+ self.host_tintf = self.tester.get_interface(host_tport)
+ self.backup_tintf = self.tester.get_interface(backup_tport)
+
+ self.host_mem_channels = self.host_dut.get_memory_channels()
+ self.backup_mem_channels = self.backup_dut.get_memory_channels()
+ self.host_pci_info = self.host_dut.ports_info[0]['pci']
+ self.backup_pci_info = self.backup_dut.ports_info[0]['pci']
- # flag for environment
- self.env_done = False
+ self.virio_mac = "52:54:00:00:00:01"
+ self.queue_number = 1
+ self.vm_dut_host = None
+ self.backup_vm = None
+ self.screen_name = 'migration'
+ self.base_dir = self.dut.base_dir.replace('~', '/root')
+ host_socket_num = len(set([int(core['socket']) for core in self.host_dut.cores]))
+ backup_socket_num = len(set([int(core['socket']) for core in self.backup_dut.cores]))
+ self.host_socket_mem = ','.join(['1024']*host_socket_num)
+ self.backup_socket_mem = ','.join(['1024']*backup_socket_num)
def set_up(self):
- self.setup_vm_env()
- pass
-
- def bind_nic_driver(self, crb, ports, driver=""):
- # modprobe vfio driver
- if driver == "vfio-pci":
- for port in ports:
- netdev = crb.ports_info[port]['port']
- driver = netdev.get_nic_driver()
- if driver != 'vfio-pci':
- netdev.bind_driver(driver='vfio-pci')
-
- elif driver == "igb_uio":
- # igb_uio should insmod as default, no need to check
- for port in ports:
- netdev = crb.ports_info[port]['port']
- driver = netdev.get_nic_driver()
- if driver != 'igb_uio':
- netdev.bind_driver(driver='igb_uio')
- else:
- for port in ports:
- netdev = crb.ports_info[port]['port']
- driver_now = netdev.get_nic_driver()
- if driver == "":
- driver = netdev.default_driver
- if driver != driver_now:
- netdev.bind_driver(driver=driver)
-
- def setup_vm_env(self, driver='default'):
+ self.host_dut.send_expect('rm ./vhost-net*', '# ', 30)
+ self.backup_dut.send_expect('rm ./vhost-net*', '# ', 30)
+ self.migration_done = False
+
+ def config_mount_server(self):
+ '''
+ get the mount server config from file /etc/exports
+ if not config the mount info of host_dut and backup_dut, config it
+ '''
+ config = '%s %s(rw,sync,no_root_squash)' % (
+ self.share_path, self.backup_dut.crb['IP'])
+ try:
+ fd = open('/etc/exports', 'r+')
+ except Exception as e:
+ self.logger.error('read file /etc/exports failed as %s' % str(e))
+ raise e
+ line = fd.readline()
+ while(line):
+ # already config in etc file
+ if not line.startswith('#') and config in line:
+ break
+ line = fd.readline()
+ # not config in etc file, wirte the config to it
+ if not line:
+ fd.write(config)
+ fd.close()
+
+ def config_mount_client(self):
+ '''
+ config the mount client to access the mount server
+ '''
+ out = self.backup_dut.send_expect('ls -d %s' % self.mount_path, '# ')
+ if 'No such file or directory' in out:
+ self.backup_dut.send_expect('mkdir -p %s' % self.mount_path, '# ')
+ config = 'mount -t nfs -o nolock,vers=4 %s:%s %s' % (
+ self.host_dut.crb['IP'], self.share_path, self.mount_path)
+ self.host_dut.send_expect('service nfs-server restart', '# ')
+ self.backup_dut.send_expect('service nfs-server restart', '# ')
+ self.backup_dut.send_expect('umount %s' % self.mount_path, '# ')
+ self.backup_dut.send_expect(config, '# ')
+ time.sleep(2)
+ # verify the mount result
+ out_host = self.host_dut.send_expect('ls %s' % self.share_path, '#')
+ out_backup = self.backup_dut.send_expect('ls %s' % self.mount_path, '#')
+ self.verify(out_host == out_backup, 'the mount action failed, please confrim it')
+
+ def get_core_list(self):
+ core_number = self.queue_number + 1
+ core_config = '1S/%dC/1T' % core_number
+ core_list0 = self.duts[0].get_core_list(core_config)
+ core_list1 = self.duts[1].get_core_list(core_config)
+ self.verify(len(core_list0) >= core_number and len(core_list1) >= core_number,
+ 'There have not enough cores to start testpmd on duts')
+ self.host_core_mask_user = utils.create_mask(core_list0)
+ self.backup_core_mask_user = utils.create_mask(core_list1)
+
+ def launch_testpmd_as_vhost_on_both_dut(self, zero_copy=False):
+ """
+ start testpmd as vhost user on host_dut and backup_dut
+ """
+ self.get_core_list()
+ zero_copy_str = ''
+ if zero_copy is True:
+ zero_copy_str = ',dequeue-zero-copy=1'
+ vdev_info = 'eth_vhost0,iface=%s/vhost-net,queues=%d%s' % (
+ self.base_dir, self.queue_number, zero_copy_str)
+
+ params_info = '--nb-cores=%d --rxq=%d --txq=%d' % (
+ self.queue_number, self.queue_number, self.queue_number)
+
+ cmd_line = self.dut.target + '/app/testpmd -c %s -n %d -w %s ' + \
+ "--socket-mem %s --legacy-mem --file-prefix=vhost --vdev '%s' " + \
+ "-- -i %s"
+ host_cmd_line = cmd_line % (self.host_core_mask_user, self.host_mem_channels,
+ self.host_pci_info, self.host_socket_mem,
+ vdev_info, params_info)
+ backup_cmd_line = cmd_line % (self.backup_core_mask_user, self.backup_mem_channels,
+ self.backup_pci_info, self.backup_socket_mem,
+ vdev_info, params_info)
+
+ self.host_dut.send_expect(host_cmd_line, 'testpmd> ', 30)
+ self.backup_dut.send_expect(backup_cmd_line, 'testpmd> ', 30)
+
+ def start_testpmd_with_fwd_mode_on_both_dut(self, fwd_mode='io'):
+ self.host_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ', 30)
+ self.host_dut.send_expect('start', 'testpmd> ', 30)
+ self.backup_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ', 30)
+ self.backup_dut.send_expect('start', 'testpmd> ', 30)
+
+ def setup_vm_env_on_both_dut(self, driver='default'):
"""
Create testing environment on Host and Backup
"""
- if self.env_done:
- return
-
- # start vhost application on host and backup machines
- self.logger.info("Start vhost on host and backup host")
- for crb in self.duts[:2]:
- self.bind_nic_driver(crb, [crb.get_ports()[0]], driver="igb_uio")
- # start vhost app: testpmd, predict hugepage on both sockets
- base_dir = crb.base_dir.replace('~', '/root')
- crb.send_expect("rm -f %s/vhost-net" % base_dir, "# ")
- crb.send_expect("%s -c f -n 4 --socket-mem 512,512 --vdev 'eth_vhost0,iface=./vhost-net,queues=1' -- -i" % self.vhost, "testpmd> ",60)
- crb.send_expect("start", "testpmd> ")
-
try:
# set up host virtual machine
- self.host_vm = QEMUKvm(self.duts[0], 'host', 'vhost_user_live_migration')
+ self.host_vm = VM(self.duts[0], 'host', '%s' % self.suite_name)
vhost_params = {}
vhost_params['driver'] = 'vhost-user'
- # qemu command can't use ~
- base_dir = self.dut.base_dir.replace('~', '/root')
- vhost_params['opt_path'] = base_dir + '/vhost-net'
+ vhost_params['opt_path'] = self.base_dir + '/vhost-net'
vhost_params['opt_mac'] = self.virio_mac
+ opt_params = 'mrg_rxbuf=on'
+ if self.queue_number > 1:
+ vhost_params['opt_queue'] = self.queue_number
+ opt_params = 'mrg_rxbuf=on,mq=on,vectors=%d' % (2*self.queue_number + 2)
+ vhost_params['opt_settings'] = opt_params
self.host_vm.set_vm_device(**vhost_params)
self.logger.info("Start virtual machine on host")
- self.vm_host = self.host_vm.start()
+ self.vm_dut_host = self.host_vm.start()
- if self.vm_host is None:
+ if self.vm_dut_host is None:
raise Exception("Set up host VM ENV failed!")
- self.host_serial = self.host_vm.connect_serial_port(name='vhost_user_live_migration')
- if self.host_serial is None:
- raise Exception("Connect host serial port failed!")
-
self.logger.info("Start virtual machine on backup host")
# set up backup virtual machine
- self.backup_vm = QEMUKvm(self.duts[1], 'backup', 'vhost_user_live_migration')
+ self.backup_vm = VM(self.duts[1], 'backup', 'vhost_user_live_migration')
vhost_params = {}
vhost_params['driver'] = 'vhost-user'
- # qemu command can't use ~
- base_dir = self.dut.base_dir.replace('~', '/root')
- vhost_params['opt_path'] = base_dir + '/vhost-net'
+ vhost_params['opt_path'] = self.base_dir + '/vhost-net'
vhost_params['opt_mac'] = self.virio_mac
+ if self.queue_number > 1:
+ vhost_params['opt_queue'] = self.queue_number
+ vhost_params['opt_settings'] = opt_params
self.backup_vm.set_vm_device(**vhost_params)
# start qemu command
@@ -132,90 +227,118 @@ class TestVhostUserLiveMigration(TestCase):
self.destroy_vm_env()
raise Exception(ex)
- self.env_done = True
-
def destroy_vm_env(self):
- # if environment has been destroyed, just skip
- if self.env_done is False:
- return
-
- if getattr(self, 'host_serial', None):
- if self.host_vm is not None:
- self.host_vm.close_serial_port()
-
- if getattr(self, 'backup_serial', None):
- if self.backup_serial is not None and self.backup_vm is not None:
- self.backup_vm.close_serial_port()
-
-
- if getattr(self, 'vm_host', None):
- if self.vm_host is not None:
+ self.logger.info("Stop virtual machine on host")
+ try:
+ if self.vm_dut_host is not None:
+ if not self.migration_done:
+ self.vm_dut_host.send_expect('pkill screen', '# ')
self.host_vm.stop()
self.host_vm = None
+ except Exception as e:
+ self.logger.error('stop the qemu host failed as %s' % str(e))
self.logger.info("Stop virtual machine on backup host")
- if getattr(self, 'vm_backup', None):
- if self.vm_backup is not None:
- self.vm_backup.kill_all()
- # backup vm dut has been initialized, destroy backup vm
- self.backup_vm.stop()
- self.backup_vm = None
-
- if getattr(self, 'backup_vm', None):
- # only qemu start, no session created
+ try:
if self.backup_vm is not None:
+ if self.migration_done:
+ self.vm_dut_backup.kill_all()
+ self.vm_dut_backup.send_expect('pkill screen', '# ')
self.backup_vm.stop()
self.backup_vm = None
+ except Exception as e:
+ self.logger.error('stop the qemu backup failed as %s' % str(e))
# after vm stopped, stop vhost testpmd
- for crb in self.duts[:2]:
+ for crb in self.duts:
+ crb.send_expect('quit', '# ')
crb.kill_all()
- for crb in self.duts[:2]:
- self.bind_nic_driver(crb, [crb.get_ports()[0]], driver="igb_uio")
-
- self.env_done = False
-
- def send_pkts(self, intf, number=0):
+ def bind_nic_driver_of_vm(self, crb, driver=""):
+ # modprobe vfio driver
+ ports = crb.get_ports()
+ if driver == "vfio-pci":
+ crb.send_expect('modprobe vfio-pci', '# ')
+ for port in ports:
+ netdev = crb.ports_info[port]['port']
+ driver_now = netdev.get_nic_driver()
+ if driver_now != driver:
+ netdev.bind_driver(driver)
+
+ def send_pkts_in_bg(self):
"""
send packet from tester
"""
- sendp_fmt = "sendp([Ether(dst='%(DMAC)s')/IP()/UDP()/Raw('x'*18)], iface='%(INTF)s', count=%(COUNT)d)"
- sendp_cmd = sendp_fmt % {'DMAC': self.virio_mac, 'INTF': intf, 'COUNT': number}
- self.tester.scapy_append(sendp_cmd)
- self.tester.scapy_execute()
- # sleep 10 seconds for heavy load with backup host
- time.sleep(10)
-
- def verify_dpdk(self, tester_port, serial_session):
- num_pkts = 10
+ sendp_fmt = "sendp([Ether(dst='%s')/IP(src='%s', dst='%s')/UDP(sport=11,dport=12)/('x'*18)], iface='%s', loop=1, inter=0.5)"
+ sendp_cmd = sendp_fmt % (self.virio_mac, '1.1.1.1', '2.2.2.2', self.host_tintf)
+ self.send_pks_session = self.tester.create_session("scapy1")
+ self.send_pks_session.send_expect("scapy", ">>>")
+ self.send_pks_session.send_command(sendp_cmd)
+
+ if self.host_tintf != self.backup_tintf:
+ sendp_cmd = sendp_fmt % {'DMAC': self.virio_mac, 'INTF': self.backup_tintf}
+ self.send_pks_session2 = self.tester.create_session("scapy2")
+ self.send_pks_session2.send_expect("scapy", ">>>")
+ self.send_pks_session2.send_command(sendp_cmd)
+
+ def stop_send_pkts_on_tester(self):
+ self.tester.send_expect('pkill scapy', '# ')
+ if getattr(self, "scapy1", None):
+ self.tester.destroy_session(self.send_pks_session)
+ if getattr(self, "scapy2", None):
+ self.tester.destroy_session(self.send_pks_session2)
+
+ def start_testpmd_on_vm(self, vm_dut):
+ vm_dut.send_expect('export TERM=screen', '# ')
+ vm_dut.send_expect('screen -S %s' % self.screen_name, '# ', 120)
+
+ vm_testpmd = self.target + '/app/testpmd -c 0x3 -n 4 -- -i'
+ vm_dut.send_expect('cd %s' % self.base_dir, "# ")
+ vm_dut.send_expect(vm_testpmd, 'testpmd> ', 120)
+ vm_dut.send_expect('set fwd rxonly', 'testpmd> ', 30)
+ vm_dut.send_expect('set promisc all off', 'testpmd> ', 30)
+ vm_dut.send_expect('start', 'testpmd> ', 30)
+ vm_dut.send_command('^a')
+ vm_dut.send_command('^d')
+
+ def verify_dpdk(self, vm_dut):
+ vm_dut.send_expect('export TERM=screen', '# ')
+ vm_dut.send_command('screen -r %s' % self.screen_name)
stats_pat = re.compile("RX-packets: (\d+)")
- intf = self.tester.get_interface(tester_port)
- serial_session.send_expect("stop", "testpmd> ")
- serial_session.send_expect("set fwd rxonly", "testpmd> ")
- serial_session.send_expect("clear port stats all", "testpmd> ")
- serial_session.send_expect("start tx_first", "testpmd> ")
-
- # send packets from tester
- self.send_pkts(intf, number=num_pkts)
-
- out = serial_session.send_expect("show port stats 0", "testpmd> ")
+ vm_dut.send_expect("clear port stats all", "testpmd> ")
+ time.sleep(5)
+ out = vm_dut.send_expect("show port stats 0", "testpmd> ")
+ print out
m = stats_pat.search(out)
if m:
num_received = int(m.group(1))
else:
num_received = 0
- self.logger.info("Verified %s packets recevied" % num_received)
- self.verify(num_received >= num_pkts, "Not receive packets as expected!!!")
+ self.verify(num_received > 0, "Not receive packets as expected!!!")
+ vm_dut.send_command('^a')
+ vm_dut.send_command('^d')
- def verify_kernel(self, tester_port, vm_dut):
+ def verify_kernel(self, vm_dut):
"""
Function to verify packets received by virtIO
"""
- intf = self.tester.get_interface(tester_port)
- num_pkts = 10
+ vm_dut.send_expect('export TERM=screen', '# ')
+ vm_dut.send_command('screen -r %s' % self.screen_name)
+ # clean the output info before verify
+ vm_dut.get_session_output(timeout=1)
+ time.sleep(5)
+ out = vm_dut.get_session_output(timeout=1)
+ print out
+ num = out.count('UDP')
+ self.verify(num > 0, "Not receive packets as expected!!!")
+ vm_dut.send_command('^a')
+ vm_dut.send_command('^d')
+
+ def start_tcpdump_on_vm(self, vm_dut):
+ vm_dut.send_expect('export TERM=screen', '# ')
+ vm_dut.send_expect('screen -S %s' % self.screen_name, '# ', 120)
# get host interface
vm_intf = vm_dut.ports_info[0]['port'].get_interface_name()
@@ -232,99 +355,108 @@ class TestVhostUserLiveMigration(TestCase):
direct_param = ""
vm_dut.send_expect("tcpdump -i %s %s -v" % (vm_intf, direct_param), "listening on", 120)
- # wait for promisc on
- time.sleep(3)
+ time.sleep(2)
+ vm_dut.send_command('^a')
+ vm_dut.send_command('^d')
+
+ def send_and_verify(self, verify_fun, multi_queue=False):
+ '''
+ start to send packets
+ verify vm_host can recevied packets before migration
+ verify vm_host can recevied packets during migration
+ verify vm_backup can recevied packets after migration
+ '''
# send packets from tester
- self.send_pkts(intf, number=num_pkts)
+ self.send_pkts_in_bg()
- # killall tcpdump and verify packet received
- out = vm_dut.get_session_output(timeout=1)
- vm_dut.send_expect("^C", "# ")
- num = out.count('UDP')
- self.logger.info("Verified %s packets recevied" % num_pkts)
- self.verify(num == num_pkts, "Not receive packets as expected!!!")
-
- def test_migrate_with_kernel(self):
- """
- Verify migrate virtIO device from host to backup host,
- Verify before/in/after migration, device with kernel driver can receive packets
- """
- # bind virtio-net back to virtio-pci
- self.bind_nic_driver(self.vm_host, [self.vm_host.get_ports()[0]], driver="")
# verify host virtio-net work fine
- self.verify_kernel(self.host_tport, self.vm_host)
+ verify_fun(self.vm_dut_host)
self.logger.info("Migrate host VM to backup host")
# start live migration
- ret = self.host_vm.start_migration(self.backup_dutip, self.backup_vm.migrate_port)
+ ret = self.host_vm.start_migration(self.backup_dut_ip, self.backup_vm.migrate_port)
self.verify(ret, "Failed to migration, please check VM and qemu version")
- # make sure still can receive packets in migration process
- self.verify_kernel(self.host_tport, self.vm_host)
+ if multi_queue is True:
+ vm_intf = self.vm_dut_host.ports_info[0]['port'].get_interface_name()
+ out = self.vm_dut_host.send_expect('ethtool -L %s combined 4' % vm_intf, '# ')
+ self.verify('Error' not in out and 'Failed' not in out, 'ethtool set combined failed during migration')
self.logger.info("Waiting migration process done")
# wait live migration done
self.host_vm.wait_migration_done()
-
- # check vhost testpmd log after migration
- out = self.duts[0].get_session_output(timeout=1)
- self.verify("closed" in out, "Vhost Connection NOT closed on host")
- out = self.duts[1].get_session_output(timeout=1)
- self.verify("established" in out, "Device not ready on backup host")
+ self.migration_done = True
self.logger.info("Migration process done, then go to backup VM")
# connected backup VM
- self.vm_backup = self.backup_vm.migrated_start()
+ self.vm_dut_backup = self.backup_vm.migrated_start(set_target=False)
# make sure still can receive packets
- self.verify_kernel(self.backup_tport, self.vm_backup)
+ verify_fun(self.vm_dut_backup)
- def test_migrate_with_dpdk(self):
- # bind virtio-net to igb_uio
- self.bind_nic_driver(self.vm_host, [self.vm_host.get_ports()[0]], driver="igb_uio")
+ def test_migrate_with_virtio_net(self):
+ """
+ Verify migrate virtIO device from host to backup host,
+ Verify before/in/after migration, device with kernel driver can receive packets
+ """
+ self.queue_number = 1
+ self.launch_testpmd_as_vhost_on_both_dut()
+ self.start_testpmd_with_fwd_mode_on_both_dut()
+ self.setup_vm_env_on_both_dut()
- # start testpmd on host vm
- base_dir = self.vm_host.base_dir.replace('~', '/root')
- self.host_serial.send_expect('cd %s' % base_dir, "# ")
- self.host_serial.send_expect(self.vm_testpmd, "testpmd> ", 120)
+ # bind virtio-net back to virtio-pci
+ self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
+ # start screen and tcpdump on vm
+ self.start_tcpdump_on_vm(self.vm_dut_host)
- # verify testpmd receive packets
- self.verify_dpdk(self.host_tport, self.host_serial)
+ self.send_and_verify(self.verify_kernel)
- self.logger.info("Migrate host VM to backup host")
- # start live migration
+ def test_migrete_with_vritio_net_with_multi_queue(self):
+ self.queue_number = 4
+ self.launch_testpmd_as_vhost_on_both_dut()
+ self.start_testpmd_with_fwd_mode_on_both_dut()
+ self.setup_vm_env_on_both_dut()
- ret = self.host_vm.start_migration(self.backup_dutip, self.backup_vm.migrate_port)
- self.verify(ret, "Failed to migration, please check VM and qemu version")
+ # bind virtio-net back to virtio-pci
+ self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
+ self.start_tcpdump_on_vm(self.vm_dut_host)
- # make sure still can receive packets in migration process
- self.verify_dpdk(self.host_tport, self.host_serial)
+ self.send_and_verify(self.verify_kernel, True)
- self.logger.info("Waiting migration process done")
- # wait live migration done
- self.host_vm.wait_migration_done()
+ def test_migrate_with_virtio_pmd(self):
+ self.queue_number = 1
+ self.launch_testpmd_as_vhost_on_both_dut()
+ self.start_testpmd_with_fwd_mode_on_both_dut()
+ self.setup_vm_env_on_both_dut()
- # check vhost testpmd log after migration
- out = self.duts[0].get_session_output(timeout=1)
- self.verify("closed" in out, "Vhost Connection NOT closed on host")
- out = self.duts[1].get_session_output(timeout=1)
- self.verify("established" in out, "Device not ready on backup host")
+ # bind virtio-net to igb_uio
+ self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
+ self.start_testpmd_on_vm(self.vm_dut_host)
- self.logger.info("Migration process done, then go to backup VM")
- time.sleep(5)
+ self.send_and_verify(self.verify_dpdk)
- # make sure still can receive packets
- self.backup_serial = self.backup_vm.connect_serial_port(name='vhost_user_live_migration', first=False)
- if self.backup_serial is None:
- raise Exception("Connect backup host serial port failed!")
+ def test_migrate_with_zero_copy_virtio_pmd(self):
+ self.queue_number = 1
+ zero_copy = True
+ # start testpmd and qemu on dut
+ # after qemu start ok, then send 'start' command to testpmd
+ # if send 'start' command before start qemu, maybe qemu will start failed
+ self.launch_testpmd_as_vhost_on_both_dut(zero_copy)
+ self.setup_vm_env_on_both_dut()
+ self.start_testpmd_with_fwd_mode_on_both_dut()
- self.verify_dpdk(self.backup_tport, self.backup_serial)
+ # bind virtio-net to igb_uio
+ self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
+ self.start_testpmd_on_vm(self.vm_dut_host)
- # quit testpmd
- self.backup_serial.send_expect("quit", "# ")
+ self.send_and_verify(self.verify_dpdk)
def tear_down(self):
self.destroy_vm_env()
+ # stop send packet on tester
+ self.stop_send_pkts_on_tester()
+ self.duts[0].send_expect('killall -s INT qemu-system-x86_64', '#')
+ self.duts[1].send_expect('killall -s INT qemu-system-x86_64', '#')
pass
def tear_down_all(self):
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH V1 3/3][migration] tests/vhost_user_live_migration: update code
2019-10-10 0:19 ` [dts] [PATCH V1 3/3][migration] tests/vhost_user_live_migration: update code lihong
@ 2019-10-10 8:03 ` Wang, Yinan
0 siblings, 0 replies; 7+ messages in thread
From: Wang, Yinan @ 2019-10-10 8:03 UTC (permalink / raw)
To: Ma, LihongX, dts
Acked-by: Wang, Yinan <yinan.wang@intel.com>
> -----Original Message-----
> From: Ma, LihongX
> Sent: 2019年10月10日 8:19
> To: dts@dpdk.org
> Cc: Wang, Yinan <yinan.wang@intel.com>; Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts][PATCH V1 3/3][migration] tests/vhost_user_live_migration: update
> code
>
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
> tests/TestSuite_vhost_user_live_migration.py | 532
> +++++++++++++++++----------
> 1 file changed, 332 insertions(+), 200 deletions(-)
>
> diff --git a/tests/TestSuite_vhost_user_live_migration.py
> b/tests/TestSuite_vhost_user_live_migration.py
> index 9bd3237..fa21b93 100644
> --- a/tests/TestSuite_vhost_user_live_migration.py
> +++ b/tests/TestSuite_vhost_user_live_migration.py
> @@ -1,10 +1,40 @@
> -# <COPYRIGHT_TAG>
> +# BSD LICENSE
> +#
> +# Copyright(c) <2019> Intel Corporation.
> +# All rights reserved.
> +#
> +# Redistribution and use in source and binary forms, with or without #
> +modification, are permitted provided that the following conditions #
> +are met:
> +#
> +# * Redistributions of source code must retain the above copyright
> +# notice, this list of conditions and the following disclaimer.
> +# * Redistributions in binary form must reproduce the above copyright
> +# notice, this list of conditions and the following disclaimer in
> +# the documentation and/or other materials provided with the
> +# distribution.
> +# * Neither the name of Intel Corporation nor the names of its
> +# contributors may be used to endorse or promote products derived
> +# from this software without specific prior written permission.
> +#
> +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS #
> +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
> +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> #
> +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> #
> +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> #
> +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
> +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> #
> +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
> ANY #
> +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
> +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
> USE #
> +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
>
> import re
> import time
> -
> -from qemu_kvm import QEMUKvm
> +import utils
> +from virt_common import VM
> from test_case import TestCase
> +from config import UserConf
> from exception import VirtDutInitException
>
>
> @@ -13,109 +43,174 @@ class TestVhostUserLiveMigration(TestCase):
> def set_up_all(self):
> # verify at least two duts
> self.verify(len(self.duts) >= 2, "Insufficient duts for live migration!!!")
> + self.host_dut = self.duts[0]
> + self.backup_dut = self.duts[1]
>
> # each dut required one ports
> - self.dut_ports = self.dut.get_ports()
> - # Verify that enough ports are available
> - self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing")
> - self.dut_port = self.dut_ports[0]
> - dut_ip = self.dut.crb['My IP']
> - self.host_tport = self.tester.get_local_port_bydut(self.dut_port,
> dut_ip)
> - self.host_tintf = self.tester.get_interface(self.host_tport)
> -
> - self.backup_ports = self.duts[1].get_ports()
> - # Verify that enough ports are available
> - self.verify(len(self.backup_ports) >= 1, "Insufficient ports for testing")
> - self.backup_port = self.backup_ports[0]
> - # backup host ip will be used in migrate command
> - self.backup_dutip = self.duts[1].crb['My IP']
> - self.backup_tport = self.tester.get_local_port_bydut(self.backup_port,
> self.backup_dutip)
> - self.backup_tintf = self.tester.get_interface(self.backup_tport)
> -
> - # Use testpmd as vhost-user application on host/backup server
> - self.vhost = "./%s/app/testpmd" % self.target
> - self.vm_testpmd = "./%s/app/testpmd -c 0x3 -n 4 -- -i" % self.target
> - self.virio_mac = "52:54:00:00:00:01"
> + host_dut_ports = self.host_dut.get_ports()
> + backup_dut_ports = self.backup_dut.get_ports()
> + self.verify(len(host_dut_ports) >= 1 and len(backup_dut_ports) >= 1,
> + "Insufficient ports for testing")
> +
> + # get mount info from cfg file
> + conf_info = UserConf('conf/%s.cfg' % self.suite_name)
> + conf_session = conf_info.conf._sections['mount_info']
> + self.mount_path = conf_session['backup_mount_path']
> + self.share_path = conf_session['host_share_dir']
> + # config the mount server and client
> + self.config_mount_server()
> + self.config_mount_client()
> +
> + host_dut_port = host_dut_ports[0]
> + host_dut_ip = self.host_dut.crb['My IP']
> + backup_dut_port = backup_dut_ports[0]
> + self.backup_dut_ip = self.backup_dut.crb['My IP']
> +
> + host_tport = self.tester.get_local_port_bydut(host_dut_port,
> host_dut_ip)
> + backup_tport = self.tester.get_local_port_bydut(backup_dut_port,
> self.backup_dut_ip)
> + self.host_tintf = self.tester.get_interface(host_tport)
> + self.backup_tintf = self.tester.get_interface(backup_tport)
> +
> + self.host_mem_channels = self.host_dut.get_memory_channels()
> + self.backup_mem_channels =
> self.backup_dut.get_memory_channels()
> + self.host_pci_info = self.host_dut.ports_info[0]['pci']
> + self.backup_pci_info = self.backup_dut.ports_info[0]['pci']
>
> - # flag for environment
> - self.env_done = False
> + self.virio_mac = "52:54:00:00:00:01"
> + self.queue_number = 1
> + self.vm_dut_host = None
> + self.backup_vm = None
> + self.screen_name = 'migration'
> + self.base_dir = self.dut.base_dir.replace('~', '/root')
> + host_socket_num = len(set([int(core['socket']) for core in
> self.host_dut.cores]))
> + backup_socket_num = len(set([int(core['socket']) for core in
> self.backup_dut.cores]))
> + self.host_socket_mem = ','.join(['1024']*host_socket_num)
> + self.backup_socket_mem = ','.join(['1024']*backup_socket_num)
>
> def set_up(self):
> - self.setup_vm_env()
> - pass
> -
> - def bind_nic_driver(self, crb, ports, driver=""):
> - # modprobe vfio driver
> - if driver == "vfio-pci":
> - for port in ports:
> - netdev = crb.ports_info[port]['port']
> - driver = netdev.get_nic_driver()
> - if driver != 'vfio-pci':
> - netdev.bind_driver(driver='vfio-pci')
> -
> - elif driver == "igb_uio":
> - # igb_uio should insmod as default, no need to check
> - for port in ports:
> - netdev = crb.ports_info[port]['port']
> - driver = netdev.get_nic_driver()
> - if driver != 'igb_uio':
> - netdev.bind_driver(driver='igb_uio')
> - else:
> - for port in ports:
> - netdev = crb.ports_info[port]['port']
> - driver_now = netdev.get_nic_driver()
> - if driver == "":
> - driver = netdev.default_driver
> - if driver != driver_now:
> - netdev.bind_driver(driver=driver)
> -
> - def setup_vm_env(self, driver='default'):
> + self.host_dut.send_expect('rm ./vhost-net*', '# ', 30)
> + self.backup_dut.send_expect('rm ./vhost-net*', '# ', 30)
> + self.migration_done = False
> +
> + def config_mount_server(self):
> + '''
> + get the mount server config from file /etc/exports
> + if not config the mount info of host_dut and backup_dut, config it
> + '''
> + config = '%s %s(rw,sync,no_root_squash)' % (
> + self.share_path, self.backup_dut.crb['IP'])
> + try:
> + fd = open('/etc/exports', 'r+')
> + except Exception as e:
> + self.logger.error('read file /etc/exports failed as %s' % str(e))
> + raise e
> + line = fd.readline()
> + while(line):
> + # already config in etc file
> + if not line.startswith('#') and config in line:
> + break
> + line = fd.readline()
> + # not config in etc file, wirte the config to it
> + if not line:
> + fd.write(config)
> + fd.close()
> +
> + def config_mount_client(self):
> + '''
> + config the mount client to access the mount server
> + '''
> + out = self.backup_dut.send_expect('ls -d %s' % self.mount_path, '# ')
> + if 'No such file or directory' in out:
> + self.backup_dut.send_expect('mkdir -p %s' % self.mount_path, '#
> ')
> + config = 'mount -t nfs -o nolock,vers=4 %s:%s %s' % (
> + self.host_dut.crb['IP'], self.share_path,
> self.mount_path)
> + self.host_dut.send_expect('service nfs-server restart', '# ')
> + self.backup_dut.send_expect('service nfs-server restart', '# ')
> + self.backup_dut.send_expect('umount %s' % self.mount_path, '# ')
> + self.backup_dut.send_expect(config, '# ')
> + time.sleep(2)
> + # verify the mount result
> + out_host = self.host_dut.send_expect('ls %s' % self.share_path, '#')
> + out_backup = self.backup_dut.send_expect('ls %s' % self.mount_path,
> '#')
> + self.verify(out_host == out_backup, 'the mount action failed,
> + please confrim it')
> +
> + def get_core_list(self):
> + core_number = self.queue_number + 1
> + core_config = '1S/%dC/1T' % core_number
> + core_list0 = self.duts[0].get_core_list(core_config)
> + core_list1 = self.duts[1].get_core_list(core_config)
> + self.verify(len(core_list0) >= core_number and len(core_list1) >=
> core_number,
> + 'There have not enough cores to start testpmd on duts')
> + self.host_core_mask_user = utils.create_mask(core_list0)
> + self.backup_core_mask_user = utils.create_mask(core_list1)
> +
> + def launch_testpmd_as_vhost_on_both_dut(self, zero_copy=False):
> + """
> + start testpmd as vhost user on host_dut and backup_dut
> + """
> + self.get_core_list()
> + zero_copy_str = ''
> + if zero_copy is True:
> + zero_copy_str = ',dequeue-zero-copy=1'
> + vdev_info = 'eth_vhost0,iface=%s/vhost-net,queues=%d%s' % (
> + self.base_dir, self.queue_number, zero_copy_str)
> +
> + params_info = '--nb-cores=%d --rxq=%d --txq=%d' % (
> + self.queue_number, self.queue_number,
> + self.queue_number)
> +
> + cmd_line = self.dut.target + '/app/testpmd -c %s -n %d -w %s ' + \
> + "--socket-mem %s --legacy-mem --file-prefix=vhost --vdev
> '%s' " + \
> + "-- -i %s"
> + host_cmd_line = cmd_line % (self.host_core_mask_user,
> self.host_mem_channels,
> + self.host_pci_info, self.host_socket_mem,
> + vdev_info, params_info)
> + backup_cmd_line = cmd_line % (self.backup_core_mask_user,
> self.backup_mem_channels,
> + self.backup_pci_info, self.backup_socket_mem,
> + vdev_info, params_info)
> +
> + self.host_dut.send_expect(host_cmd_line, 'testpmd> ', 30)
> + self.backup_dut.send_expect(backup_cmd_line, 'testpmd> ', 30)
> +
> + def start_testpmd_with_fwd_mode_on_both_dut(self, fwd_mode='io'):
> + self.host_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ', 30)
> + self.host_dut.send_expect('start', 'testpmd> ', 30)
> + self.backup_dut.send_expect('set fwd %s' % fwd_mode, 'testpmd> ',
> 30)
> + self.backup_dut.send_expect('start', 'testpmd> ', 30)
> +
> + def setup_vm_env_on_both_dut(self, driver='default'):
> """
> Create testing environment on Host and Backup
> """
> - if self.env_done:
> - return
> -
> - # start vhost application on host and backup machines
> - self.logger.info("Start vhost on host and backup host")
> - for crb in self.duts[:2]:
> - self.bind_nic_driver(crb, [crb.get_ports()[0]], driver="igb_uio")
> - # start vhost app: testpmd, predict hugepage on both sockets
> - base_dir = crb.base_dir.replace('~', '/root')
> - crb.send_expect("rm -f %s/vhost-net" % base_dir, "# ")
> - crb.send_expect("%s -c f -n 4 --socket-mem 512,512 --vdev
> 'eth_vhost0,iface=./vhost-net,queues=1' -- -i" % self.vhost, "testpmd> ",60)
> - crb.send_expect("start", "testpmd> ")
> -
> try:
> # set up host virtual machine
> - self.host_vm = QEMUKvm(self.duts[0], 'host',
> 'vhost_user_live_migration')
> + self.host_vm = VM(self.duts[0], 'host', '%s' %
> + self.suite_name)
> vhost_params = {}
> vhost_params['driver'] = 'vhost-user'
> - # qemu command can't use ~
> - base_dir = self.dut.base_dir.replace('~', '/root')
> - vhost_params['opt_path'] = base_dir + '/vhost-net'
> + vhost_params['opt_path'] = self.base_dir + '/vhost-net'
> vhost_params['opt_mac'] = self.virio_mac
> + opt_params = 'mrg_rxbuf=on'
> + if self.queue_number > 1:
> + vhost_params['opt_queue'] = self.queue_number
> + opt_params = 'mrg_rxbuf=on,mq=on,vectors=%d' %
> (2*self.queue_number + 2)
> + vhost_params['opt_settings'] = opt_params
> self.host_vm.set_vm_device(**vhost_params)
>
> self.logger.info("Start virtual machine on host")
> - self.vm_host = self.host_vm.start()
> + self.vm_dut_host = self.host_vm.start()
>
> - if self.vm_host is None:
> + if self.vm_dut_host is None:
> raise Exception("Set up host VM ENV failed!")
>
> - self.host_serial =
> self.host_vm.connect_serial_port(name='vhost_user_live_migration')
> - if self.host_serial is None:
> - raise Exception("Connect host serial port failed!")
> -
> self.logger.info("Start virtual machine on backup host")
> # set up backup virtual machine
> - self.backup_vm = QEMUKvm(self.duts[1], 'backup',
> 'vhost_user_live_migration')
> + self.backup_vm = VM(self.duts[1], 'backup',
> + 'vhost_user_live_migration')
> vhost_params = {}
> vhost_params['driver'] = 'vhost-user'
> - # qemu command can't use ~
> - base_dir = self.dut.base_dir.replace('~', '/root')
> - vhost_params['opt_path'] = base_dir + '/vhost-net'
> + vhost_params['opt_path'] = self.base_dir + '/vhost-net'
> vhost_params['opt_mac'] = self.virio_mac
> + if self.queue_number > 1:
> + vhost_params['opt_queue'] = self.queue_number
> + vhost_params['opt_settings'] = opt_params
> self.backup_vm.set_vm_device(**vhost_params)
>
> # start qemu command
> @@ -132,90 +227,118 @@ class TestVhostUserLiveMigration(TestCase):
> self.destroy_vm_env()
> raise Exception(ex)
>
> - self.env_done = True
> -
> def destroy_vm_env(self):
> - # if environment has been destroyed, just skip
> - if self.env_done is False:
> - return
> -
> - if getattr(self, 'host_serial', None):
> - if self.host_vm is not None:
> - self.host_vm.close_serial_port()
> -
> - if getattr(self, 'backup_serial', None):
> - if self.backup_serial is not None and self.backup_vm is not None:
> - self.backup_vm.close_serial_port()
> -
> -
> - if getattr(self, 'vm_host', None):
> - if self.vm_host is not None:
> + self.logger.info("Stop virtual machine on host")
> + try:
> + if self.vm_dut_host is not None:
> + if not self.migration_done:
> + self.vm_dut_host.send_expect('pkill screen', '# ')
> self.host_vm.stop()
> self.host_vm = None
> + except Exception as e:
> + self.logger.error('stop the qemu host failed as %s' %
> + str(e))
>
> self.logger.info("Stop virtual machine on backup host")
> - if getattr(self, 'vm_backup', None):
> - if self.vm_backup is not None:
> - self.vm_backup.kill_all()
> - # backup vm dut has been initialized, destroy backup vm
> - self.backup_vm.stop()
> - self.backup_vm = None
> -
> - if getattr(self, 'backup_vm', None):
> - # only qemu start, no session created
> + try:
> if self.backup_vm is not None:
> + if self.migration_done:
> + self.vm_dut_backup.kill_all()
> + self.vm_dut_backup.send_expect('pkill screen', '#
> + ')
> self.backup_vm.stop()
> self.backup_vm = None
> + except Exception as e:
> + self.logger.error('stop the qemu backup failed as %s' %
> + str(e))
>
> # after vm stopped, stop vhost testpmd
> - for crb in self.duts[:2]:
> + for crb in self.duts:
> + crb.send_expect('quit', '# ')
> crb.kill_all()
>
> - for crb in self.duts[:2]:
> - self.bind_nic_driver(crb, [crb.get_ports()[0]], driver="igb_uio")
> -
> - self.env_done = False
> -
> - def send_pkts(self, intf, number=0):
> + def bind_nic_driver_of_vm(self, crb, driver=""):
> + # modprobe vfio driver
> + ports = crb.get_ports()
> + if driver == "vfio-pci":
> + crb.send_expect('modprobe vfio-pci', '# ')
> + for port in ports:
> + netdev = crb.ports_info[port]['port']
> + driver_now = netdev.get_nic_driver()
> + if driver_now != driver:
> + netdev.bind_driver(driver)
> +
> + def send_pkts_in_bg(self):
> """
> send packet from tester
> """
> - sendp_fmt = "sendp([Ether(dst='%(DMAC)s')/IP()/UDP()/Raw('x'*18)],
> iface='%(INTF)s', count=%(COUNT)d)"
> - sendp_cmd = sendp_fmt % {'DMAC': self.virio_mac, 'INTF': intf,
> 'COUNT': number}
> - self.tester.scapy_append(sendp_cmd)
> - self.tester.scapy_execute()
> - # sleep 10 seconds for heavy load with backup host
> - time.sleep(10)
> -
> - def verify_dpdk(self, tester_port, serial_session):
> - num_pkts = 10
> + sendp_fmt = "sendp([Ether(dst='%s')/IP(src='%s',
> dst='%s')/UDP(sport=11,dport=12)/('x'*18)], iface='%s', loop=1, inter=0.5)"
> + sendp_cmd = sendp_fmt % (self.virio_mac, '1.1.1.1', '2.2.2.2',
> self.host_tintf)
> + self.send_pks_session = self.tester.create_session("scapy1")
> + self.send_pks_session.send_expect("scapy", ">>>")
> + self.send_pks_session.send_command(sendp_cmd)
> +
> + if self.host_tintf != self.backup_tintf:
> + sendp_cmd = sendp_fmt % {'DMAC': self.virio_mac, 'INTF':
> self.backup_tintf}
> + self.send_pks_session2 = self.tester.create_session("scapy2")
> + self.send_pks_session2.send_expect("scapy", ">>>")
> + self.send_pks_session2.send_command(sendp_cmd)
> +
> + def stop_send_pkts_on_tester(self):
> + self.tester.send_expect('pkill scapy', '# ')
> + if getattr(self, "scapy1", None):
> + self.tester.destroy_session(self.send_pks_session)
> + if getattr(self, "scapy2", None):
> + self.tester.destroy_session(self.send_pks_session2)
> +
> + def start_testpmd_on_vm(self, vm_dut):
> + vm_dut.send_expect('export TERM=screen', '# ')
> + vm_dut.send_expect('screen -S %s' % self.screen_name, '# ',
> + 120)
> +
> + vm_testpmd = self.target + '/app/testpmd -c 0x3 -n 4 -- -i'
> + vm_dut.send_expect('cd %s' % self.base_dir, "# ")
> + vm_dut.send_expect(vm_testpmd, 'testpmd> ', 120)
> + vm_dut.send_expect('set fwd rxonly', 'testpmd> ', 30)
> + vm_dut.send_expect('set promisc all off', 'testpmd> ', 30)
> + vm_dut.send_expect('start', 'testpmd> ', 30)
> + vm_dut.send_command('^a')
> + vm_dut.send_command('^d')
> +
> + def verify_dpdk(self, vm_dut):
> + vm_dut.send_expect('export TERM=screen', '# ')
> + vm_dut.send_command('screen -r %s' % self.screen_name)
>
> stats_pat = re.compile("RX-packets: (\d+)")
> - intf = self.tester.get_interface(tester_port)
> - serial_session.send_expect("stop", "testpmd> ")
> - serial_session.send_expect("set fwd rxonly", "testpmd> ")
> - serial_session.send_expect("clear port stats all", "testpmd> ")
> - serial_session.send_expect("start tx_first", "testpmd> ")
> -
> - # send packets from tester
> - self.send_pkts(intf, number=num_pkts)
> -
> - out = serial_session.send_expect("show port stats 0", "testpmd> ")
> + vm_dut.send_expect("clear port stats all", "testpmd> ")
> + time.sleep(5)
> + out = vm_dut.send_expect("show port stats 0", "testpmd> ")
> + print out
> m = stats_pat.search(out)
> if m:
> num_received = int(m.group(1))
> else:
> num_received = 0
>
> - self.logger.info("Verified %s packets recevied" % num_received)
> - self.verify(num_received >= num_pkts, "Not receive packets as
> expected!!!")
> + self.verify(num_received > 0, "Not receive packets as expected!!!")
> + vm_dut.send_command('^a')
> + vm_dut.send_command('^d')
>
> - def verify_kernel(self, tester_port, vm_dut):
> + def verify_kernel(self, vm_dut):
> """
> Function to verify packets received by virtIO
> """
> - intf = self.tester.get_interface(tester_port)
> - num_pkts = 10
> + vm_dut.send_expect('export TERM=screen', '# ')
> + vm_dut.send_command('screen -r %s' % self.screen_name)
> + # clean the output info before verify
> + vm_dut.get_session_output(timeout=1)
> + time.sleep(5)
> + out = vm_dut.get_session_output(timeout=1)
> + print out
> + num = out.count('UDP')
> + self.verify(num > 0, "Not receive packets as expected!!!")
> + vm_dut.send_command('^a')
> + vm_dut.send_command('^d')
> +
> + def start_tcpdump_on_vm(self, vm_dut):
> + vm_dut.send_expect('export TERM=screen', '# ')
> + vm_dut.send_expect('screen -S %s' % self.screen_name, '# ',
> + 120)
>
> # get host interface
> vm_intf = vm_dut.ports_info[0]['port'].get_interface_name()
> @@ -232,99 +355,108 @@ class TestVhostUserLiveMigration(TestCase):
> direct_param = ""
>
> vm_dut.send_expect("tcpdump -i %s %s -v" % (vm_intf,
> direct_param), "listening on", 120)
> - # wait for promisc on
> - time.sleep(3)
> + time.sleep(2)
> + vm_dut.send_command('^a')
> + vm_dut.send_command('^d')
> +
> + def send_and_verify(self, verify_fun, multi_queue=False):
> + '''
> + start to send packets
> + verify vm_host can recevied packets before migration
> + verify vm_host can recevied packets during migration
> + verify vm_backup can recevied packets after migration
> + '''
> # send packets from tester
> - self.send_pkts(intf, number=num_pkts)
> + self.send_pkts_in_bg()
>
> - # killall tcpdump and verify packet received
> - out = vm_dut.get_session_output(timeout=1)
> - vm_dut.send_expect("^C", "# ")
> - num = out.count('UDP')
> - self.logger.info("Verified %s packets recevied" % num_pkts)
> - self.verify(num == num_pkts, "Not receive packets as expected!!!")
> -
> - def test_migrate_with_kernel(self):
> - """
> - Verify migrate virtIO device from host to backup host,
> - Verify before/in/after migration, device with kernel driver can receive
> packets
> - """
> - # bind virtio-net back to virtio-pci
> - self.bind_nic_driver(self.vm_host, [self.vm_host.get_ports()[0]],
> driver="")
> # verify host virtio-net work fine
> - self.verify_kernel(self.host_tport, self.vm_host)
> + verify_fun(self.vm_dut_host)
>
> self.logger.info("Migrate host VM to backup host")
> # start live migration
> - ret = self.host_vm.start_migration(self.backup_dutip,
> self.backup_vm.migrate_port)
> + ret = self.host_vm.start_migration(self.backup_dut_ip,
> + self.backup_vm.migrate_port)
> self.verify(ret, "Failed to migration, please check VM and qemu
> version")
>
> - # make sure still can receive packets in migration process
> - self.verify_kernel(self.host_tport, self.vm_host)
> + if multi_queue is True:
> + vm_intf =
> self.vm_dut_host.ports_info[0]['port'].get_interface_name()
> + out = self.vm_dut_host.send_expect('ethtool -L %s combined 4' %
> vm_intf, '# ')
> + self.verify('Error' not in out and 'Failed' not in out,
> + 'ethtool set combined failed during migration')
>
> self.logger.info("Waiting migration process done")
> # wait live migration done
> self.host_vm.wait_migration_done()
> -
> - # check vhost testpmd log after migration
> - out = self.duts[0].get_session_output(timeout=1)
> - self.verify("closed" in out, "Vhost Connection NOT closed on host")
> - out = self.duts[1].get_session_output(timeout=1)
> - self.verify("established" in out, "Device not ready on backup host")
> + self.migration_done = True
>
> self.logger.info("Migration process done, then go to backup VM")
> # connected backup VM
> - self.vm_backup = self.backup_vm.migrated_start()
> + self.vm_dut_backup =
> + self.backup_vm.migrated_start(set_target=False)
>
> # make sure still can receive packets
> - self.verify_kernel(self.backup_tport, self.vm_backup)
> + verify_fun(self.vm_dut_backup)
>
> - def test_migrate_with_dpdk(self):
> - # bind virtio-net to igb_uio
> - self.bind_nic_driver(self.vm_host, [self.vm_host.get_ports()[0]],
> driver="igb_uio")
> + def test_migrate_with_virtio_net(self):
> + """
> + Verify migrate virtIO device from host to backup host,
> + Verify before/in/after migration, device with kernel driver can receive
> packets
> + """
> + self.queue_number = 1
> + self.launch_testpmd_as_vhost_on_both_dut()
> + self.start_testpmd_with_fwd_mode_on_both_dut()
> + self.setup_vm_env_on_both_dut()
>
> - # start testpmd on host vm
> - base_dir = self.vm_host.base_dir.replace('~', '/root')
> - self.host_serial.send_expect('cd %s' % base_dir, "# ")
> - self.host_serial.send_expect(self.vm_testpmd, "testpmd> ", 120)
> + # bind virtio-net back to virtio-pci
> + self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
> + # start screen and tcpdump on vm
> + self.start_tcpdump_on_vm(self.vm_dut_host)
>
> - # verify testpmd receive packets
> - self.verify_dpdk(self.host_tport, self.host_serial)
> + self.send_and_verify(self.verify_kernel)
>
> - self.logger.info("Migrate host VM to backup host")
> - # start live migration
> + def test_migrete_with_vritio_net_with_multi_queue(self):
> + self.queue_number = 4
> + self.launch_testpmd_as_vhost_on_both_dut()
> + self.start_testpmd_with_fwd_mode_on_both_dut()
> + self.setup_vm_env_on_both_dut()
>
> - ret = self.host_vm.start_migration(self.backup_dutip,
> self.backup_vm.migrate_port)
> - self.verify(ret, "Failed to migration, please check VM and qemu
> version")
> + # bind virtio-net back to virtio-pci
> + self.bind_nic_driver_of_vm(self.vm_dut_host, driver="")
> + self.start_tcpdump_on_vm(self.vm_dut_host)
>
> - # make sure still can receive packets in migration process
> - self.verify_dpdk(self.host_tport, self.host_serial)
> + self.send_and_verify(self.verify_kernel, True)
>
> - self.logger.info("Waiting migration process done")
> - # wait live migration done
> - self.host_vm.wait_migration_done()
> + def test_migrate_with_virtio_pmd(self):
> + self.queue_number = 1
> + self.launch_testpmd_as_vhost_on_both_dut()
> + self.start_testpmd_with_fwd_mode_on_both_dut()
> + self.setup_vm_env_on_both_dut()
>
> - # check vhost testpmd log after migration
> - out = self.duts[0].get_session_output(timeout=1)
> - self.verify("closed" in out, "Vhost Connection NOT closed on host")
> - out = self.duts[1].get_session_output(timeout=1)
> - self.verify("established" in out, "Device not ready on backup host")
> + # bind virtio-net to igb_uio
> + self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
> + self.start_testpmd_on_vm(self.vm_dut_host)
>
> - self.logger.info("Migration process done, then go to backup VM")
> - time.sleep(5)
> + self.send_and_verify(self.verify_dpdk)
>
> - # make sure still can receive packets
> - self.backup_serial =
> self.backup_vm.connect_serial_port(name='vhost_user_live_migration',
> first=False)
> - if self.backup_serial is None:
> - raise Exception("Connect backup host serial port failed!")
> + def test_migrate_with_zero_copy_virtio_pmd(self):
> + self.queue_number = 1
> + zero_copy = True
> + # start testpmd and qemu on dut
> + # after qemu start ok, then send 'start' command to testpmd
> + # if send 'start' command before start qemu, maybe qemu will start
> failed
> + self.launch_testpmd_as_vhost_on_both_dut(zero_copy)
> + self.setup_vm_env_on_both_dut()
> + self.start_testpmd_with_fwd_mode_on_both_dut()
>
> - self.verify_dpdk(self.backup_tport, self.backup_serial)
> + # bind virtio-net to igb_uio
> + self.bind_nic_driver_of_vm(self.vm_dut_host, driver="igb_uio")
> + self.start_testpmd_on_vm(self.vm_dut_host)
>
> - # quit testpmd
> - self.backup_serial.send_expect("quit", "# ")
> + self.send_and_verify(self.verify_dpdk)
>
> def tear_down(self):
> self.destroy_vm_env()
> + # stop send packet on tester
> + self.stop_send_pkts_on_tester()
> + self.duts[0].send_expect('killall -s INT qemu-system-x86_64', '#')
> + self.duts[1].send_expect('killall -s INT qemu-system-x86_64',
> + '#')
> pass
>
> def tear_down_all(self):
> --
> 2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH V1 2/3][migration] framework: update code support migration
2019-10-10 0:19 ` [dts] [PATCH V1 2/3][migration] framework: update code support migration lihong
@ 2019-10-10 8:04 ` Wang, Yinan
0 siblings, 0 replies; 7+ messages in thread
From: Wang, Yinan @ 2019-10-10 8:04 UTC (permalink / raw)
To: Ma, LihongX, dts
Acked-by: Wang, Yinan <yinan.wang@intel.com>
> -----Original Message-----
> From: Ma, LihongX
> Sent: 2019年10月10日 8:19
> To: dts@dpdk.org
> Cc: Wang, Yinan <yinan.wang@intel.com>; Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts][PATCH V1 2/3][migration] framework: update code support
> migration
>
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
> framework/virt_base.py | 8 ++++++++
> framework/virt_dut.py | 12 ++++++++----
> 2 files changed, 16 insertions(+), 4 deletions(-)
>
> diff --git a/framework/virt_base.py b/framework/virt_base.py index
> 7c1e1de..6e3462c 100644
> --- a/framework/virt_base.py
> +++ b/framework/virt_base.py
> @@ -65,6 +65,8 @@ class VirtBase(object):
> self.host_dut = dut
> self.vm_name = vm_name
> self.suite = suite_name
> + # indicate whether the current vm is migration vm
> + self.migration_vm = False
>
> # create self used host session, need close it later
> self.host_session = self.host_dut.new_session(self.vm_name)
> @@ -330,6 +332,8 @@ class VirtBase(object):
> """
> try:
> if self.vm_status is ST_PAUSE:
> + # flag current vm is migration vm
> + self.migration_vm = True
> # connect backup vm dut and it just inherited from host
> vm_dut = self.instantiate_vm_dut(set_target, cpu_topo,
> bind_dev=False, autodetect_topo=False)
> except Exception as vm_except:
> @@ -419,9 +423,13 @@ class VirtBase(object):
> vm_dut.host_dut = self.host_dut
> vm_dut.host_session = self.host_session
> vm_dut.init_log()
> + vm_dut.migration_vm = self.migration_vm
>
> read_cache = False
> skip_setup = self.host_dut.skip_setup
> + # if current vm is migration vm, skip compile dpdk
> + if self.migration_vm:
> + skip_setup = True
> base_dir = self.host_dut.base_dir
> vm_dut.set_speedup_options(read_cache, skip_setup)
>
> diff --git a/framework/virt_dut.py b/framework/virt_dut.py index
> b6f40d8..e4394b9 100644
> --- a/framework/virt_dut.py
> +++ b/framework/virt_dut.py
> @@ -58,6 +58,7 @@ class VirtDut(DPDKdut):
> self.hyper = hyper
> self.cpu_topo = cpu_topo
> self.dut_id = dut_id
> + self.migration_vm = False
>
> self.vm_ip = crb['IP']
> self.NAME = 'virtdut' + LOG_NAME_SEP + '%s' % self.vm_ip @@
> -186,10 +187,13 @@ class VirtDut(DPDKdut):
> self.update_ports()
>
> # restore dut ports to kernel
> - if self.virttype != 'XEN':
> - self.restore_interfaces()
> - else:
> - self.restore_interfaces_domu()
> + # if current vm is migration vm, skip restore dut ports
> + # because there maybe have some app have run
> + if not self.migration_vm:
> + if self.virttype != 'XEN':
> + self.restore_interfaces()
> + else:
> + self.restore_interfaces_domu()
> # rescan ports after interface up
> self.rescan_ports()
>
> --
> 2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration
2019-10-10 0:19 [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration lihong
2019-10-10 0:19 ` [dts] [PATCH V1 2/3][migration] framework: update code support migration lihong
2019-10-10 0:19 ` [dts] [PATCH V1 3/3][migration] tests/vhost_user_live_migration: update code lihong
@ 2019-10-10 8:04 ` Wang, Yinan
2019-10-12 6:09 ` Tu, Lijuan
3 siblings, 0 replies; 7+ messages in thread
From: Wang, Yinan @ 2019-10-10 8:04 UTC (permalink / raw)
To: Ma, LihongX, dts
Acked-by: Wang, Yinan <yinan.wang@intel.com>
> -----Original Message-----
> From: Ma, LihongX
> Sent: 2019年10月10日 8:19
> To: dts@dpdk.org
> Cc: Wang, Yinan <yinan.wang@intel.com>; Ma, LihongX <lihongx.ma@intel.com>
> Subject: [dts][PATCH V1 1/3][migration] conf: add config file of
> vhost_user_live_migration
>
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
> conf/vhost_user_live_migration.cfg | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/conf/vhost_user_live_migration.cfg
> b/conf/vhost_user_live_migration.cfg
> index c687acd..f8b192d 100644
> --- a/conf/vhost_user_live_migration.cfg
> +++ b/conf/vhost_user_live_migration.cfg
> @@ -90,6 +90,11 @@
> # listending tcp port
>
> # vm configuration for vhost user live migration case
> +# host_share_dir config the dir of vm img on host # backup_mount_path
> +config the mount dir on backup [mount_info]
> +host_share_dir=/home/vm-image backup_mount_path=/mnt/nfs
> [host]
> cpu =
> model=host,number=4,cpupin=5 6 7 8; @@ -101,8 +106,6 @@ login =
> user=root,password=tester;
> daemon =
> enable=yes;
> -serial_port =
> - enable=yes;
> [backup]
> cpu =
> model=host,number=4,cpupin=5 6 7 8; @@ -116,5 +119,3 @@ daemon =
> enable=yes;
> migration =
> enable=yes,port=4444;
> -serial_port =
> - enable=yes;
> --
> 2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration
2019-10-10 0:19 [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration lihong
` (2 preceding siblings ...)
2019-10-10 8:04 ` [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration Wang, Yinan
@ 2019-10-12 6:09 ` Tu, Lijuan
3 siblings, 0 replies; 7+ messages in thread
From: Tu, Lijuan @ 2019-10-12 6:09 UTC (permalink / raw)
To: Ma, LihongX, dts; +Cc: Wang, Yinan, Ma, LihongX
The patch series applied failed, please rework them.
> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of lihong
> Sent: Thursday, October 10, 2019 8:19 AM
> To: dts@dpdk.org
> Cc: Wang, Yinan <yinan.wang@intel.com>; Ma, LihongX
> <lihongx.ma@intel.com>
> Subject: [dts] [PATCH V1 1/3][migration] conf: add config file of
> vhost_user_live_migration
>
> Signed-off-by: lihong <lihongx.ma@intel.com>
> ---
> conf/vhost_user_live_migration.cfg | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/conf/vhost_user_live_migration.cfg
> b/conf/vhost_user_live_migration.cfg
> index c687acd..f8b192d 100644
> --- a/conf/vhost_user_live_migration.cfg
> +++ b/conf/vhost_user_live_migration.cfg
> @@ -90,6 +90,11 @@
> # listending tcp port
>
> # vm configuration for vhost user live migration case
> +# host_share_dir config the dir of vm img on host # backup_mount_path
> +config the mount dir on backup [mount_info]
> +host_share_dir=/home/vm-image backup_mount_path=/mnt/nfs
> [host]
> cpu =
> model=host,number=4,cpupin=5 6 7 8; @@ -101,8 +106,6 @@ login =
> user=root,password=tester;
> daemon =
> enable=yes;
> -serial_port =
> - enable=yes;
> [backup]
> cpu =
> model=host,number=4,cpupin=5 6 7 8; @@ -116,5 +119,3 @@ daemon =
> enable=yes;
> migration =
> enable=yes,port=4444;
> -serial_port =
> - enable=yes;
> --
> 2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2019-10-12 6:09 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-10 0:19 [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration lihong
2019-10-10 0:19 ` [dts] [PATCH V1 2/3][migration] framework: update code support migration lihong
2019-10-10 8:04 ` Wang, Yinan
2019-10-10 0:19 ` [dts] [PATCH V1 3/3][migration] tests/vhost_user_live_migration: update code lihong
2019-10-10 8:03 ` Wang, Yinan
2019-10-10 8:04 ` [dts] [PATCH V1 1/3][migration] conf: add config file of vhost_user_live_migration Wang, Yinan
2019-10-12 6:09 ` Tu, Lijuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).