test suite reviews and discussions
 help / color / mirror / Atom feed
* [dts] [PATCH V1]framework/qemu_libvirt: add new features
@ 2019-08-19  4:57 yufengmx
  2019-08-19  4:57 ` yufengmx
  2019-08-19  7:26 ` Wang, Yinan
  0 siblings, 2 replies; 5+ messages in thread
From: yufengmx @ 2019-08-19  4:57 UTC (permalink / raw)
  To: dts, yinan.wang; +Cc: yufengmx

 These new features demand come from Intel BKC team and wang,yinan. 
*. support bridge setting in xml. 
*. support vhost-user setting in xml. support to add driver attribute by set option opt_queue and opt_setting. 
*. add __add_vm_net_tap method to add tap&bridge net device. 
*. add __generate_net_config_script method to set default br0 using script. 
*. add __write_config method to make recursion write config in xml config file. 
*. use dut logger to take the place of print. 
*. remove graphics vnc config in set_vm_default method, add add_vm_vnc method to do graphics vnc setting. This is aimed to keep the same as qemu_kvm method. 
*. add add_vm_daemon method to keep the same as qemu_kvm method. libvirt run virtual machine with daemon status by default. 

yufengmx (1):
  framework/qemu_libvirt: add new features

 framework/qemu_libvirt.py | 327 ++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 273 insertions(+), 54 deletions(-)

-- 
1.9.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dts] [PATCH V1]framework/qemu_libvirt: add new features
  2019-08-19  4:57 [dts] [PATCH V1]framework/qemu_libvirt: add new features yufengmx
@ 2019-08-19  4:57 ` yufengmx
  2019-08-19  7:25   ` Wang, Yinan
  2019-08-28  6:09   ` Tu, Lijuan
  2019-08-19  7:26 ` Wang, Yinan
  1 sibling, 2 replies; 5+ messages in thread
From: yufengmx @ 2019-08-19  4:57 UTC (permalink / raw)
  To: dts, yinan.wang; +Cc: yufengmx


*. support bridge setting in xml.
*. support vhost-user setting in xml. support to add driver attribute by set option opt_queue and opt_setting.
*. add __add_vm_net_tap method to add tap&bridge net device.
*. add __generate_net_config_script method to set default br0 using script.
*. add __write_config method to make recursion write config in xml config file.
*. use dut logger to take the place of print.
*. remove graphics vnc config in set_vm_default method, add add_vm_vnc method to do graphics vnc setting. This is aimed to keep the same as qemu_kvm method.
*. add add_vm_daemon method to keep the same as qemu_kvm method. libvirt run virtual machine with daemon status by default.

Signed-off-by: yufengmx <yufengx.mo@intel.com>
---
 framework/qemu_libvirt.py | 327 ++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 273 insertions(+), 54 deletions(-)

diff --git a/framework/qemu_libvirt.py b/framework/qemu_libvirt.py
index 66fc54b..d2edd31 100644
--- a/framework/qemu_libvirt.py
+++ b/framework/qemu_libvirt.py
@@ -44,9 +44,25 @@ from config import VIRTCONF
 from exception import StartVMFailedException
 import xml.etree.ElementTree as ET
 from xml.etree.ElementTree import ElementTree
+from xml.dom import minidom
 
 
 class LibvirtKvm(VirtBase):
+    DEFAULT_BRIDGE = 'br0'
+    QEMU_IFUP = "#!/bin/sh\n\n" + \
+                "set -x\n\n" + \
+                "switch=%(switch)s\n\n" + \
+                "if [ -n '$1' ];then\n" + \
+                "   tunctl -t $1\n" + \
+                "   ip link set $1 up\n" + \
+                "   sleep 0.5s\n" + \
+                "   brctl addif $switch $1\n" + \
+                "   exit 0\n" + \
+                "else\n" + \
+                "   echo 'Error: no interface specified'\n" + \
+                "   exit 1\n" + \
+                "fi"
+    QEMU_IFUP_PATH = '/etc/qemu-ifup'
 
     def __init__(self, dut, name, suite):
         # initialize virtualization base module
@@ -55,6 +71,7 @@ class LibvirtKvm(VirtBase):
         # initialize qemu emulator, example: qemu-system-x86_64
         self.qemu_emulator = self.get_qemu_emulator()
 
+        self.logger = dut.logger
         # disk and pci device default index
         self.diskindex = 'a'
         self.controllerindex = 0
@@ -102,7 +119,8 @@ class LibvirtKvm(VirtBase):
         """
         arch = self.host_session.send_expect('uname -m', '# ')
         if arch == 'aarch64':
-            out = self.host_session.send_expect('service libvirtd status', "# ")
+            out = self.host_session.send_expect(
+                'service libvirtd status', "# ")
             if 'active (running)' not in out:
                 return False
             return True
@@ -214,13 +232,12 @@ class LibvirtKvm(VirtBase):
         os = self.domain.find('os')
         if 'loader' in options.keys():
             loader = ET.SubElement(
-            os, 'loader', {'readonly': 'yes', 'type': 'pflash'})
+                os, 'loader', {'readonly': 'yes', 'type': 'pflash'})
             loader.text = options['loader']
         if 'nvram' in options.keys():
             nvram = ET.SubElement(os, 'nvram')
             nvram.text = options['nvram']
 
-
     def set_vm_default_aarch64(self):
         os = ET.SubElement(self.domain, 'os')
         type = ET.SubElement(
@@ -231,7 +248,7 @@ class LibvirtKvm(VirtBase):
         ET.SubElement(features, 'acpi')
 
         ET.SubElement(self.domain, 'cpu',
-            {'mode': 'host-passthrough', 'check': 'none'})
+                      {'mode': 'host-passthrough', 'check': 'none'})
 
     def set_vm_default_x86_64(self):
         os = ET.SubElement(self.domain, 'os')
@@ -252,15 +269,11 @@ class LibvirtKvm(VirtBase):
         set_default_func = getattr(self, 'set_vm_default_' + arch)
         if callable(set_default_func):
             set_default_func()
-            
 
         # qemu-kvm for emulator
         device = ET.SubElement(self.domain, 'devices')
         ET.SubElement(device, 'emulator').text = self.qemu_emulator
 
-        # graphic device
-        ET.SubElement(device, 'graphics', {
-                      'type': 'vnc', 'port': '-1', 'autoport': 'yes'})
         # qemu guest agent
         self.add_vm_qga(None)
 
@@ -338,15 +351,48 @@ class LibvirtKvm(VirtBase):
 
         if 'opt_controller' in options:
             controller = ET.SubElement(devices, 'controller',
-                {'type': bus,
-                'index': hex(self.controllerindex)[2:],
-                'model': options['opt_controller']})
+                                       {'type': bus,
+                                        'index': hex(self.controllerindex)[2:],
+                                        'model': options['opt_controller']})
             self.controllerindex += 1
-            ET.SubElement(controller, 'address',
+            ET.SubElement(
+                controller, 'address',
                 {'type': 'pci', 'domain': '0x0000', 'bus': hex(self.pciindex),
-                'slot': '0x00', 'function': '0x00'})
+                 'slot': '0x00', 'function': '0x00'})
             self.pciindex += 1
 
+    def add_vm_daemon(self, **options):
+        pass
+
+    def add_vm_vnc(self, **options):
+        """
+        Add VM display option
+        """
+        disable = options.get('disable')
+        if disable and disable == 'True':
+            return
+        else:
+            displayNum = options.get('displayNum')
+            port = \
+                displayNum if displayNum else \
+                self.virt_pool.alloc_port(self.vm_name, port_type="display")
+        ip = self.host_dut.get_ip_address()
+        # set main block
+        graphics = {
+            'type': 'vnc',
+            'port': port,
+            'autoport': 'yes',
+            'listen': ip,
+            'keymap': 'en-us', }
+
+        devices = self.domain.find('devices')
+        graphics = ET.SubElement(devices, 'graphics', graphics)
+        # set sub block
+        listen = {
+            'type': 'address',
+            'address': ip, }
+        ET.SubElement(graphics, 'listen', listen)
+
     def add_vm_serial_port(self, **options):
         if 'enable' in options.keys():
             if options['enable'].lower() == 'yes':
@@ -356,18 +402,26 @@ class LibvirtKvm(VirtBase):
                 else:
                     serial_type = 'unix'
                 if serial_type == 'pty':
-                    serial = ET.SubElement(devices, 'serial', {'type': serial_type})
+                    serial = ET.SubElement(
+                        devices, 'serial', {'type': serial_type})
                     ET.SubElement(serial, 'target', {'port': '0'})
                 elif serial_type == 'unix':
-                    serial = ET.SubElement(devices, 'serial', {'type': serial_type})
+                    serial = ET.SubElement(
+                        devices, 'serial', {'type': serial_type})
                     self.serial_path = "/tmp/%s_serial.sock" % self.vm_name
-                    ET.SubElement(serial, 'source', {'mode': 'bind', 'path': self.serial_path})
+                    ET.SubElement(
+                        serial,
+                        'source',
+                        {'mode': 'bind', 'path': self.serial_path})
                     ET.SubElement(serial, 'target', {'port': '0'})
                 else:
-                    print utils.RED("Serial type %s is not supported!" % serial_type)
+                    msg = "Serial type %s is not supported!" % serial_type
+                    self.logger.error(msg)
                     return False
-                console = ET.SubElement(devices, 'console', {'type': serial_type})
-                ET.SubElement(console, 'target', {'type': 'serial', 'port': '0'})
+                console = ET.SubElement(
+                    devices, 'console', {'type': serial_type})
+                ET.SubElement(
+                    console, 'target', {'type': 'serial', 'port': '0'})
 
     def add_vm_login(self, **options):
         """
@@ -396,67 +450,201 @@ class LibvirtKvm(VirtBase):
             bus = m.group(1)
             slot = m.group(2)
             func = m.group(3)
-            dom  = '0'
+            dom = '0'
             return (bus, slot, func, dom)
         m = re.match(pci_regex_domain, pci_address)
         if m is not None:
             bus = m.group(2)
             slot = m.group(3)
             func = m.group(4)
-            dom  = m.group(1)
+            dom = m.group(1)
             return (bus, slot, func, dom)
         return None
 
     def set_vm_device(self, driver='pci-assign', **opts):
+        opts['driver'] = driver
         self.add_vm_device(**opts)
 
-    def add_vm_device(self, **options):
+    def __generate_net_config_script(self, switch=DEFAULT_BRIDGE):
         """
-        options:
-            pf_idx: device index of pass-through device
-            guestpci: assigned pci address in vm
+        Generate a script for qemu emulator to build a tap device
+        between host and guest.
         """
-        devices = self.domain.find('devices')
-        hostdevice = ET.SubElement(devices, 'hostdev', {
-                                   'mode': 'subsystem', 'type': 'pci',
-                                   'managed': 'yes'})
+        qemu_ifup = self.QEMU_IFUP % {'switch': switch}
+        file_name = os.path.basename(self.QEMU_IFUP_PATH)
+        tmp_file_path = '/tmp/%s' % file_name
+        self.host_dut.create_file(qemu_ifup, tmp_file_path)
+        self.host_session.send_expect(
+            'mv -f ~/%s %s' % (file_name, self.QEMU_IFUP_PATH), '# ')
+        self.host_session.send_expect(
+            'chmod +x %s' % self.QEMU_IFUP_PATH, '# ')
 
-        if 'opt_host' in options.keys():
-            pci_addr = options['opt_host']
-        else:
-            print utils.RED("Missing opt_host for device option!!!")
-            return False
+    def __parse_opt_setting(self, opt_settings):
+        if '=' not in opt_settings:
+            msg = 'wrong opt_settings setting'
+            raise Exception(msg)
+        setting = [item.split('=') for item in opt_settings.split(',')]
+        return dict(setting)
 
+    def __get_pci_addr_config(self, pci):
+        pci = self.__parse_pci(pci)
+        if pci is None:
+            msg = 'Invalid guestpci for host device pass-through !!!'
+            self.logger.error(msg)
+            return False
+        bus, slot, func, dom = pci
+        config = {
+            'type': 'pci', 'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
+            'slot': '0x%s' % slot, 'function': '0x%s' % func}
+        return config
+
+    def __write_config(self, parent, configs):
+        for config in configs:
+            node_name = config[0]
+            opt = config[1]
+            node = ET.SubElement(parent, node_name, opt)
+            if len(config) == 3:
+                self.__write_config(node, config[2])
+
+    def __set_vm_bridge_interface(self, **options):
+        mac = options.get('opt_mac')
+        opt_br = options.get('opt_br')
+        if not mac or not opt_br:
+            msg = "Missing some bridge device option !!!"
+            self.logger.error(msg)
+            return False
+        _config = [
+            ['mac', {'address': mac}],
+            ['source', {'bridge': opt_br, }],
+            ['model', {'type': 'virtio', }]]
+        config = [['interface', {'type': 'bridge'}, _config]]
+        # set xml file
+        parent = self.domain.find('devices')
+        self.__write_config(parent, config)
+
+    def __add_vm_virtio_user_pci(self, **options):
+        mac = options.get('opt_mac')
+        mode = options.get('opt_server') or 'client'
+        # unix socket path of character device
+        sock_path = options.get('opt_path')
+        queue = options.get('opt_queue')
+        settings = options.get('opt_settings')
+        # pci address in virtual machine
+        pci = options.get('opt_host')
+        if not mac or not sock_path:
+            msg = "Missing some vhostuser device option !!!"
+            self.logger.error(msg)
+            return False
+        node_name = 'interface'
+        # basic options
+        _config = [
+            ['mac', {'address': mac}],
+            ['source', {'type': 'unix',
+                        'path': sock_path,
+                        'mode': mode, }],
+            ['model', {'type': 'virtio', }]]
+        # append pci address
+        if pci:
+            _config.append(['address', self.__get_pci_addr_config(pci)])
+        if queue or settings:
+            drv_config = {'name': 'vhost'}
+            if settings:
+                _sub_opt = self.__parse_opt_setting(settings)
+                drv_opt = {}
+                guest_opt = {}
+                host_opt = {}
+                for key, value in _sub_opt.iteritems():
+                    if key.startswith('host_'):
+                        host_opt[key[5:]] = value
+                        continue
+                    if key.startswith('guest_'):
+                        guest_opt[key[6:]] = value
+                        continue
+                    drv_opt[key] = value
+                drv_config.update(drv_opt)
+                sub_drv_config = []
+                if host_opt:
+                    sub_drv_config.append(['host', host_opt])
+                if guest_opt:
+                    sub_drv_config.append(['guest', guest_opt])
+            # The optional queues attribute controls the number of queues to be
+            # used for either Multiqueue virtio-net or vhost-user network
+            # interfaces. Each queue will potentially be handled by a different
+            # processor, resulting in much higher throughput. virtio-net since
+            # 1.0.6 (QEMU and KVM only) vhost-user since 1.2.17(QEMU and KVM
+            # only).
+            if queue:
+                drv_config.update({'queues': queue, })
+            # set driver config
+            if sub_drv_config:
+                _config.append(['driver', drv_config, sub_drv_config])
+            else:
+                _config.append(['driver', drv_config])
+        config = [[node_name, {'type': 'vhostuser'}, _config]]
+        # set xml file
+        parent = self.domain.find('devices')
+        self.__write_config(parent, config)
 
+    def __add_vm_pci_assign(self, **options):
+        devices = self.domain.find('devices')
+        # add hostdev config block
+        config = {
+            'mode': 'subsystem',
+            'type': 'pci',
+            'managed': 'yes'}
+        hostdevice = ET.SubElement(devices, 'hostdev', config)
+        # add hostdev/source config block
+        pci_addr = options.get('opt_host')
+        if not pci_addr:
+            msg = "Missing opt_host for device option!!!"
+            self.logger.error(msg)
+            return False
         pci = self.__parse_pci(pci_addr)
         if pci is None:
             return False
         bus, slot, func, dom = pci
-
         source = ET.SubElement(hostdevice, 'source')
-        ET.SubElement(source, 'address', {
-                      'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
-                      'slot': '0x%s' % slot,
-                      'function': '0x%s' % func})
-        if 'guestpci' in options.keys():
-            guest_pci_addr = options['guestpci']
-        else:
+        config = {
+            'domain': '0x%s' % dom,
+            'bus': '0x%s' % bus,
+            'slot': '0x%s' % slot,
+            'function': '0x%s' % func}
+        ET.SubElement(source, 'address', config)
+        # add hostdev/source/address config block
+        guest_pci_addr = options.get('guestpci')
+        if not guest_pci_addr:
             guest_pci_addr = '0000:%s:00.0' % hex(self.pciindex)[2:]
             self.pciindex += 1
-        pci = self.__parse_pci(guest_pci_addr)
-        if pci is None:
-            print utils.RED('Invalid guestpci for host device pass-through!!!')
-            return False
-        bus, slot, func, dom = pci
-        ET.SubElement(hostdevice, 'address', {
-              'type': 'pci', 'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
-              'slot': '0x%s' % slot, 'function': '0x%s' % func})
+        config = self.__get_pci_addr_config(guest_pci_addr)
+        ET.SubElement(hostdevice, 'address', config)
         # save host and guest pci address mapping
         pci_map = {}
         pci_map['hostpci'] = pci_addr
         pci_map['guestpci'] = guest_pci_addr
         self.pci_maps.append(pci_map)
 
+    def add_vm_device(self, **options):
+        """
+        options:
+            pf_idx: device index of pass-through device
+            guestpci: assigned pci address in vm
+        """
+        driver_table = {
+            'vhost-user':
+                self.__add_vm_virtio_user_pci,
+            'bridge':
+                self.__set_vm_bridge_interface,
+            'pci-assign':
+                self.__add_vm_pci_assign,
+        }
+        driver = options.get('driver')
+        if not driver or driver not in driver_table.keys():
+            driver = 'pci-assign'
+            msg = 'use {0} configuration as default driver'.format(driver)
+            self.logger.warning(msg)
+        func = driver_table.get(driver)
+        func(**options)
+
     def add_vm_net(self, **options):
         """
         Options:
@@ -465,6 +653,8 @@ class LibvirtKvm(VirtBase):
         if 'type' in options.keys():
             if options['type'] == 'nic':
                 self.__add_vm_net_nic(**options)
+            elif options['type'] == 'tap':
+                self.__add_vm_net_tap(**options)
 
     def __add_vm_net_nic(self, **options):
         """
@@ -506,6 +696,30 @@ class LibvirtKvm(VirtBase):
             ET.SubElement(qemu, 'qemu:arg', {'value': 'user,hostfwd='
                                              'tcp:%s:%d-:22' % (dut_ip, port)})
 
+    def __add_vm_net_tap(self, **options):
+        """
+        type: tap
+        opt_br: br0
+            note: if choosing tap, need to specify bridge name,
+                  else it will be br0.
+        opt_script: QEMU_IFUP_PATH
+            note: if not specified, default is self.QEMU_IFUP_PATH.
+        """
+        _config = [['target', {'dev': 'tap0'}]]
+        # add bridge info
+        opt_br = options.get('opt_br')
+        bridge = opt_br if opt_br else self.DEFAULT_BRIDGE
+        _config.append(['source', {'bridge': bridge}])
+        self.__generate_net_config_script(str(bridge))
+        # add network configure script path
+        opt_script = options.get('opt_script')
+        script_path = opt_script if opt_script else self.QEMU_IFUP_PATH
+        _config.append(['script', {'path': script_path}])
+        config = [['interface', {'type': 'bridge'}, _config]]
+        # set xml file
+        parent = self.domain.find('devices')
+        self.__write_config(parent, config)
+
     def add_vm_virtio_serial_channel(self, **options):
         """
         Options:
@@ -516,7 +730,8 @@ class LibvirtKvm(VirtBase):
         channel = ET.SubElement(devices, 'channel', {'type': 'unix'})
         for opt in ['path', 'name']:
             if opt not in options.keys():
-                print "invalid virtio serial channel setting"
+                msg = "invalid virtio serial channel setting"
+                self.logger.error(msg)
                 return
 
         ET.SubElement(
@@ -570,11 +785,15 @@ class LibvirtKvm(VirtBase):
 
     def _start_vm(self):
         xml_file = "/tmp/%s.xml" % self.vm_name
-        try:
+        if os.path.exists(xml_file):
             os.remove(xml_file)
-        except:
-            pass
         self.root.write(xml_file)
+        with open(xml_file, 'rb') as fp:
+            content = fp.read()
+        doc = minidom.parseString(content)
+        vm_content = doc.toprettyxml(indent='    ')
+        with open(xml_file, 'wb') as fp:
+            fp.write(vm_content)
         self.host_session.copy_file_to(xml_file)
         time.sleep(2)
 
-- 
1.9.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dts] [PATCH V1]framework/qemu_libvirt: add new features
  2019-08-19  4:57 ` yufengmx
@ 2019-08-19  7:25   ` Wang, Yinan
  2019-08-28  6:09   ` Tu, Lijuan
  1 sibling, 0 replies; 5+ messages in thread
From: Wang, Yinan @ 2019-08-19  7:25 UTC (permalink / raw)
  To: Mo, YufengX, dts

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: Mo, YufengX
> Sent: 2019年8月19日 12:57
> To: dts@dpdk.org; Wang, Yinan <yinan.wang@intel.com>
> Cc: Mo, YufengX <yufengx.mo@intel.com>
> Subject: [dts][PATCH V1]framework/qemu_libvirt: add new features
> 
> 
> *. support bridge setting in xml.
> *. support vhost-user setting in xml. support to add driver attribute by set option
> opt_queue and opt_setting.
> *. add __add_vm_net_tap method to add tap&bridge net device.
> *. add __generate_net_config_script method to set default br0 using script.
> *. add __write_config method to make recursion write config in xml config file.
> *. use dut logger to take the place of print.
> *. remove graphics vnc config in set_vm_default method, add add_vm_vnc
> method to do graphics vnc setting. This is aimed to keep the same as qemu_kvm
> method.
> *. add add_vm_daemon method to keep the same as qemu_kvm method. libvirt
> run virtual machine with daemon status by default.
> 
> Signed-off-by: yufengmx <yufengx.mo@intel.com>
> ---
>  framework/qemu_libvirt.py | 327
> ++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 273 insertions(+), 54 deletions(-)
> 
> diff --git a/framework/qemu_libvirt.py b/framework/qemu_libvirt.py index
> 66fc54b..d2edd31 100644
> --- a/framework/qemu_libvirt.py
> +++ b/framework/qemu_libvirt.py
> @@ -44,9 +44,25 @@ from config import VIRTCONF  from exception import
> StartVMFailedException  import xml.etree.ElementTree as ET  from
> xml.etree.ElementTree import ElementTree
> +from xml.dom import minidom
> 
> 
>  class LibvirtKvm(VirtBase):
> +    DEFAULT_BRIDGE = 'br0'
> +    QEMU_IFUP = "#!/bin/sh\n\n" + \
> +                "set -x\n\n" + \
> +                "switch=%(switch)s\n\n" + \
> +                "if [ -n '$1' ];then\n" + \
> +                "   tunctl -t $1\n" + \
> +                "   ip link set $1 up\n" + \
> +                "   sleep 0.5s\n" + \
> +                "   brctl addif $switch $1\n" + \
> +                "   exit 0\n" + \
> +                "else\n" + \
> +                "   echo 'Error: no interface specified'\n" + \
> +                "   exit 1\n" + \
> +                "fi"
> +    QEMU_IFUP_PATH = '/etc/qemu-ifup'
> 
>      def __init__(self, dut, name, suite):
>          # initialize virtualization base module @@ -55,6 +71,7 @@ class
> LibvirtKvm(VirtBase):
>          # initialize qemu emulator, example: qemu-system-x86_64
>          self.qemu_emulator = self.get_qemu_emulator()
> 
> +        self.logger = dut.logger
>          # disk and pci device default index
>          self.diskindex = 'a'
>          self.controllerindex = 0
> @@ -102,7 +119,8 @@ class LibvirtKvm(VirtBase):
>          """
>          arch = self.host_session.send_expect('uname -m', '# ')
>          if arch == 'aarch64':
> -            out = self.host_session.send_expect('service libvirtd status', "# ")
> +            out = self.host_session.send_expect(
> +                'service libvirtd status', "# ")
>              if 'active (running)' not in out:
>                  return False
>              return True
> @@ -214,13 +232,12 @@ class LibvirtKvm(VirtBase):
>          os = self.domain.find('os')
>          if 'loader' in options.keys():
>              loader = ET.SubElement(
> -            os, 'loader', {'readonly': 'yes', 'type': 'pflash'})
> +                os, 'loader', {'readonly': 'yes', 'type': 'pflash'})
>              loader.text = options['loader']
>          if 'nvram' in options.keys():
>              nvram = ET.SubElement(os, 'nvram')
>              nvram.text = options['nvram']
> 
> -
>      def set_vm_default_aarch64(self):
>          os = ET.SubElement(self.domain, 'os')
>          type = ET.SubElement(
> @@ -231,7 +248,7 @@ class LibvirtKvm(VirtBase):
>          ET.SubElement(features, 'acpi')
> 
>          ET.SubElement(self.domain, 'cpu',
> -            {'mode': 'host-passthrough', 'check': 'none'})
> +                      {'mode': 'host-passthrough', 'check': 'none'})
> 
>      def set_vm_default_x86_64(self):
>          os = ET.SubElement(self.domain, 'os') @@ -252,15 +269,11 @@ class
> LibvirtKvm(VirtBase):
>          set_default_func = getattr(self, 'set_vm_default_' + arch)
>          if callable(set_default_func):
>              set_default_func()
> -
> 
>          # qemu-kvm for emulator
>          device = ET.SubElement(self.domain, 'devices')
>          ET.SubElement(device, 'emulator').text = self.qemu_emulator
> 
> -        # graphic device
> -        ET.SubElement(device, 'graphics', {
> -                      'type': 'vnc', 'port': '-1', 'autoport': 'yes'})
>          # qemu guest agent
>          self.add_vm_qga(None)
> 
> @@ -338,15 +351,48 @@ class LibvirtKvm(VirtBase):
> 
>          if 'opt_controller' in options:
>              controller = ET.SubElement(devices, 'controller',
> -                {'type': bus,
> -                'index': hex(self.controllerindex)[2:],
> -                'model': options['opt_controller']})
> +                                       {'type': bus,
> +                                        'index':
> hex(self.controllerindex)[2:],
> +                                        'model':
> + options['opt_controller']})
>              self.controllerindex += 1
> -            ET.SubElement(controller, 'address',
> +            ET.SubElement(
> +                controller, 'address',
>                  {'type': 'pci', 'domain': '0x0000', 'bus': hex(self.pciindex),
> -                'slot': '0x00', 'function': '0x00'})
> +                 'slot': '0x00', 'function': '0x00'})
>              self.pciindex += 1
> 
> +    def add_vm_daemon(self, **options):
> +        pass
> +
> +    def add_vm_vnc(self, **options):
> +        """
> +        Add VM display option
> +        """
> +        disable = options.get('disable')
> +        if disable and disable == 'True':
> +            return
> +        else:
> +            displayNum = options.get('displayNum')
> +            port = \
> +                displayNum if displayNum else \
> +                self.virt_pool.alloc_port(self.vm_name, port_type="display")
> +        ip = self.host_dut.get_ip_address()
> +        # set main block
> +        graphics = {
> +            'type': 'vnc',
> +            'port': port,
> +            'autoport': 'yes',
> +            'listen': ip,
> +            'keymap': 'en-us', }
> +
> +        devices = self.domain.find('devices')
> +        graphics = ET.SubElement(devices, 'graphics', graphics)
> +        # set sub block
> +        listen = {
> +            'type': 'address',
> +            'address': ip, }
> +        ET.SubElement(graphics, 'listen', listen)
> +
>      def add_vm_serial_port(self, **options):
>          if 'enable' in options.keys():
>              if options['enable'].lower() == 'yes':
> @@ -356,18 +402,26 @@ class LibvirtKvm(VirtBase):
>                  else:
>                      serial_type = 'unix'
>                  if serial_type == 'pty':
> -                    serial = ET.SubElement(devices, 'serial', {'type':
> serial_type})
> +                    serial = ET.SubElement(
> +                        devices, 'serial', {'type': serial_type})
>                      ET.SubElement(serial, 'target', {'port': '0'})
>                  elif serial_type == 'unix':
> -                    serial = ET.SubElement(devices, 'serial', {'type':
> serial_type})
> +                    serial = ET.SubElement(
> +                        devices, 'serial', {'type': serial_type})
>                      self.serial_path = "/tmp/%s_serial.sock" % self.vm_name
> -                    ET.SubElement(serial, 'source', {'mode': 'bind', 'path':
> self.serial_path})
> +                    ET.SubElement(
> +                        serial,
> +                        'source',
> +                        {'mode': 'bind', 'path': self.serial_path})
>                      ET.SubElement(serial, 'target', {'port': '0'})
>                  else:
> -                    print utils.RED("Serial type %s is not supported!" %
> serial_type)
> +                    msg = "Serial type %s is not supported!" % serial_type
> +                    self.logger.error(msg)
>                      return False
> -                console = ET.SubElement(devices, 'console', {'type':
> serial_type})
> -                ET.SubElement(console, 'target', {'type': 'serial', 'port': '0'})
> +                console = ET.SubElement(
> +                    devices, 'console', {'type': serial_type})
> +                ET.SubElement(
> +                    console, 'target', {'type': 'serial', 'port': '0'})
> 
>      def add_vm_login(self, **options):
>          """
> @@ -396,67 +450,201 @@ class LibvirtKvm(VirtBase):
>              bus = m.group(1)
>              slot = m.group(2)
>              func = m.group(3)
> -            dom  = '0'
> +            dom = '0'
>              return (bus, slot, func, dom)
>          m = re.match(pci_regex_domain, pci_address)
>          if m is not None:
>              bus = m.group(2)
>              slot = m.group(3)
>              func = m.group(4)
> -            dom  = m.group(1)
> +            dom = m.group(1)
>              return (bus, slot, func, dom)
>          return None
> 
>      def set_vm_device(self, driver='pci-assign', **opts):
> +        opts['driver'] = driver
>          self.add_vm_device(**opts)
> 
> -    def add_vm_device(self, **options):
> +    def __generate_net_config_script(self, switch=DEFAULT_BRIDGE):
>          """
> -        options:
> -            pf_idx: device index of pass-through device
> -            guestpci: assigned pci address in vm
> +        Generate a script for qemu emulator to build a tap device
> +        between host and guest.
>          """
> -        devices = self.domain.find('devices')
> -        hostdevice = ET.SubElement(devices, 'hostdev', {
> -                                   'mode': 'subsystem', 'type': 'pci',
> -                                   'managed': 'yes'})
> +        qemu_ifup = self.QEMU_IFUP % {'switch': switch}
> +        file_name = os.path.basename(self.QEMU_IFUP_PATH)
> +        tmp_file_path = '/tmp/%s' % file_name
> +        self.host_dut.create_file(qemu_ifup, tmp_file_path)
> +        self.host_session.send_expect(
> +            'mv -f ~/%s %s' % (file_name, self.QEMU_IFUP_PATH), '# ')
> +        self.host_session.send_expect(
> +            'chmod +x %s' % self.QEMU_IFUP_PATH, '# ')
> 
> -        if 'opt_host' in options.keys():
> -            pci_addr = options['opt_host']
> -        else:
> -            print utils.RED("Missing opt_host for device option!!!")
> -            return False
> +    def __parse_opt_setting(self, opt_settings):
> +        if '=' not in opt_settings:
> +            msg = 'wrong opt_settings setting'
> +            raise Exception(msg)
> +        setting = [item.split('=') for item in opt_settings.split(',')]
> +        return dict(setting)
> 
> +    def __get_pci_addr_config(self, pci):
> +        pci = self.__parse_pci(pci)
> +        if pci is None:
> +            msg = 'Invalid guestpci for host device pass-through !!!'
> +            self.logger.error(msg)
> +            return False
> +        bus, slot, func, dom = pci
> +        config = {
> +            'type': 'pci', 'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
> +            'slot': '0x%s' % slot, 'function': '0x%s' % func}
> +        return config
> +
> +    def __write_config(self, parent, configs):
> +        for config in configs:
> +            node_name = config[0]
> +            opt = config[1]
> +            node = ET.SubElement(parent, node_name, opt)
> +            if len(config) == 3:
> +                self.__write_config(node, config[2])
> +
> +    def __set_vm_bridge_interface(self, **options):
> +        mac = options.get('opt_mac')
> +        opt_br = options.get('opt_br')
> +        if not mac or not opt_br:
> +            msg = "Missing some bridge device option !!!"
> +            self.logger.error(msg)
> +            return False
> +        _config = [
> +            ['mac', {'address': mac}],
> +            ['source', {'bridge': opt_br, }],
> +            ['model', {'type': 'virtio', }]]
> +        config = [['interface', {'type': 'bridge'}, _config]]
> +        # set xml file
> +        parent = self.domain.find('devices')
> +        self.__write_config(parent, config)
> +
> +    def __add_vm_virtio_user_pci(self, **options):
> +        mac = options.get('opt_mac')
> +        mode = options.get('opt_server') or 'client'
> +        # unix socket path of character device
> +        sock_path = options.get('opt_path')
> +        queue = options.get('opt_queue')
> +        settings = options.get('opt_settings')
> +        # pci address in virtual machine
> +        pci = options.get('opt_host')
> +        if not mac or not sock_path:
> +            msg = "Missing some vhostuser device option !!!"
> +            self.logger.error(msg)
> +            return False
> +        node_name = 'interface'
> +        # basic options
> +        _config = [
> +            ['mac', {'address': mac}],
> +            ['source', {'type': 'unix',
> +                        'path': sock_path,
> +                        'mode': mode, }],
> +            ['model', {'type': 'virtio', }]]
> +        # append pci address
> +        if pci:
> +            _config.append(['address', self.__get_pci_addr_config(pci)])
> +        if queue or settings:
> +            drv_config = {'name': 'vhost'}
> +            if settings:
> +                _sub_opt = self.__parse_opt_setting(settings)
> +                drv_opt = {}
> +                guest_opt = {}
> +                host_opt = {}
> +                for key, value in _sub_opt.iteritems():
> +                    if key.startswith('host_'):
> +                        host_opt[key[5:]] = value
> +                        continue
> +                    if key.startswith('guest_'):
> +                        guest_opt[key[6:]] = value
> +                        continue
> +                    drv_opt[key] = value
> +                drv_config.update(drv_opt)
> +                sub_drv_config = []
> +                if host_opt:
> +                    sub_drv_config.append(['host', host_opt])
> +                if guest_opt:
> +                    sub_drv_config.append(['guest', guest_opt])
> +            # The optional queues attribute controls the number of queues to
> be
> +            # used for either Multiqueue virtio-net or vhost-user network
> +            # interfaces. Each queue will potentially be handled by a different
> +            # processor, resulting in much higher throughput. virtio-net since
> +            # 1.0.6 (QEMU and KVM only) vhost-user since 1.2.17(QEMU and
> KVM
> +            # only).
> +            if queue:
> +                drv_config.update({'queues': queue, })
> +            # set driver config
> +            if sub_drv_config:
> +                _config.append(['driver', drv_config, sub_drv_config])
> +            else:
> +                _config.append(['driver', drv_config])
> +        config = [[node_name, {'type': 'vhostuser'}, _config]]
> +        # set xml file
> +        parent = self.domain.find('devices')
> +        self.__write_config(parent, config)
> 
> +    def __add_vm_pci_assign(self, **options):
> +        devices = self.domain.find('devices')
> +        # add hostdev config block
> +        config = {
> +            'mode': 'subsystem',
> +            'type': 'pci',
> +            'managed': 'yes'}
> +        hostdevice = ET.SubElement(devices, 'hostdev', config)
> +        # add hostdev/source config block
> +        pci_addr = options.get('opt_host')
> +        if not pci_addr:
> +            msg = "Missing opt_host for device option!!!"
> +            self.logger.error(msg)
> +            return False
>          pci = self.__parse_pci(pci_addr)
>          if pci is None:
>              return False
>          bus, slot, func, dom = pci
> -
>          source = ET.SubElement(hostdevice, 'source')
> -        ET.SubElement(source, 'address', {
> -                      'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
> -                      'slot': '0x%s' % slot,
> -                      'function': '0x%s' % func})
> -        if 'guestpci' in options.keys():
> -            guest_pci_addr = options['guestpci']
> -        else:
> +        config = {
> +            'domain': '0x%s' % dom,
> +            'bus': '0x%s' % bus,
> +            'slot': '0x%s' % slot,
> +            'function': '0x%s' % func}
> +        ET.SubElement(source, 'address', config)
> +        # add hostdev/source/address config block
> +        guest_pci_addr = options.get('guestpci')
> +        if not guest_pci_addr:
>              guest_pci_addr = '0000:%s:00.0' % hex(self.pciindex)[2:]
>              self.pciindex += 1
> -        pci = self.__parse_pci(guest_pci_addr)
> -        if pci is None:
> -            print utils.RED('Invalid guestpci for host device pass-through!!!')
> -            return False
> -        bus, slot, func, dom = pci
> -        ET.SubElement(hostdevice, 'address', {
> -              'type': 'pci', 'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
> -              'slot': '0x%s' % slot, 'function': '0x%s' % func})
> +        config = self.__get_pci_addr_config(guest_pci_addr)
> +        ET.SubElement(hostdevice, 'address', config)
>          # save host and guest pci address mapping
>          pci_map = {}
>          pci_map['hostpci'] = pci_addr
>          pci_map['guestpci'] = guest_pci_addr
>          self.pci_maps.append(pci_map)
> 
> +    def add_vm_device(self, **options):
> +        """
> +        options:
> +            pf_idx: device index of pass-through device
> +            guestpci: assigned pci address in vm
> +        """
> +        driver_table = {
> +            'vhost-user':
> +                self.__add_vm_virtio_user_pci,
> +            'bridge':
> +                self.__set_vm_bridge_interface,
> +            'pci-assign':
> +                self.__add_vm_pci_assign,
> +        }
> +        driver = options.get('driver')
> +        if not driver or driver not in driver_table.keys():
> +            driver = 'pci-assign'
> +            msg = 'use {0} configuration as default driver'.format(driver)
> +            self.logger.warning(msg)
> +        func = driver_table.get(driver)
> +        func(**options)
> +
>      def add_vm_net(self, **options):
>          """
>          Options:
> @@ -465,6 +653,8 @@ class LibvirtKvm(VirtBase):
>          if 'type' in options.keys():
>              if options['type'] == 'nic':
>                  self.__add_vm_net_nic(**options)
> +            elif options['type'] == 'tap':
> +                self.__add_vm_net_tap(**options)
> 
>      def __add_vm_net_nic(self, **options):
>          """
> @@ -506,6 +696,30 @@ class LibvirtKvm(VirtBase):
>              ET.SubElement(qemu, 'qemu:arg', {'value': 'user,hostfwd='
>                                               'tcp:%s:%d-:22' % (dut_ip,
> port)})
> 
> +    def __add_vm_net_tap(self, **options):
> +        """
> +        type: tap
> +        opt_br: br0
> +            note: if choosing tap, need to specify bridge name,
> +                  else it will be br0.
> +        opt_script: QEMU_IFUP_PATH
> +            note: if not specified, default is self.QEMU_IFUP_PATH.
> +        """
> +        _config = [['target', {'dev': 'tap0'}]]
> +        # add bridge info
> +        opt_br = options.get('opt_br')
> +        bridge = opt_br if opt_br else self.DEFAULT_BRIDGE
> +        _config.append(['source', {'bridge': bridge}])
> +        self.__generate_net_config_script(str(bridge))
> +        # add network configure script path
> +        opt_script = options.get('opt_script')
> +        script_path = opt_script if opt_script else self.QEMU_IFUP_PATH
> +        _config.append(['script', {'path': script_path}])
> +        config = [['interface', {'type': 'bridge'}, _config]]
> +        # set xml file
> +        parent = self.domain.find('devices')
> +        self.__write_config(parent, config)
> +
>      def add_vm_virtio_serial_channel(self, **options):
>          """
>          Options:
> @@ -516,7 +730,8 @@ class LibvirtKvm(VirtBase):
>          channel = ET.SubElement(devices, 'channel', {'type': 'unix'})
>          for opt in ['path', 'name']:
>              if opt not in options.keys():
> -                print "invalid virtio serial channel setting"
> +                msg = "invalid virtio serial channel setting"
> +                self.logger.error(msg)
>                  return
> 
>          ET.SubElement(
> @@ -570,11 +785,15 @@ class LibvirtKvm(VirtBase):
> 
>      def _start_vm(self):
>          xml_file = "/tmp/%s.xml" % self.vm_name
> -        try:
> +        if os.path.exists(xml_file):
>              os.remove(xml_file)
> -        except:
> -            pass
>          self.root.write(xml_file)
> +        with open(xml_file, 'rb') as fp:
> +            content = fp.read()
> +        doc = minidom.parseString(content)
> +        vm_content = doc.toprettyxml(indent='    ')
> +        with open(xml_file, 'wb') as fp:
> +            fp.write(vm_content)
>          self.host_session.copy_file_to(xml_file)
>          time.sleep(2)
> 
> --
> 1.9.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dts] [PATCH V1]framework/qemu_libvirt: add new features
  2019-08-19  4:57 [dts] [PATCH V1]framework/qemu_libvirt: add new features yufengmx
  2019-08-19  4:57 ` yufengmx
@ 2019-08-19  7:26 ` Wang, Yinan
  1 sibling, 0 replies; 5+ messages in thread
From: Wang, Yinan @ 2019-08-19  7:26 UTC (permalink / raw)
  To: Mo, YufengX, dts

Acked-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: Mo, YufengX
> Sent: 2019年8月19日 12:57
> To: dts@dpdk.org; Wang, Yinan <yinan.wang@intel.com>
> Cc: Mo, YufengX <yufengx.mo@intel.com>
> Subject: [dts][PATCH V1]framework/qemu_libvirt: add new features
> 
>  These new features demand come from Intel BKC team and wang,yinan.
> *. support bridge setting in xml.
> *. support vhost-user setting in xml. support to add driver attribute by set option
> opt_queue and opt_setting.
> *. add __add_vm_net_tap method to add tap&bridge net device.
> *. add __generate_net_config_script method to set default br0 using script.
> *. add __write_config method to make recursion write config in xml config file.
> *. use dut logger to take the place of print.
> *. remove graphics vnc config in set_vm_default method, add add_vm_vnc
> method to do graphics vnc setting. This is aimed to keep the same as qemu_kvm
> method.
> *. add add_vm_daemon method to keep the same as qemu_kvm method. libvirt
> run virtual machine with daemon status by default.
> 
> yufengmx (1):
>   framework/qemu_libvirt: add new features
> 
>  framework/qemu_libvirt.py | 327
> ++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 273 insertions(+), 54 deletions(-)
> 
> --
> 1.9.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dts] [PATCH V1]framework/qemu_libvirt: add new features
  2019-08-19  4:57 ` yufengmx
  2019-08-19  7:25   ` Wang, Yinan
@ 2019-08-28  6:09   ` Tu, Lijuan
  1 sibling, 0 replies; 5+ messages in thread
From: Tu, Lijuan @ 2019-08-28  6:09 UTC (permalink / raw)
  To: Mo, YufengX, dts, Wang, Yinan; +Cc: Mo, YufengX

Sorry, applied failed, maybe code base changed.

error: patch failed: framework/qemu_libvirt.py:44
error: framework/qemu_libvirt.py: patch does not apply
Patch failed at 0001 framework/qemu_libvirt: add new features

> -----Original Message-----
> From: dts [mailto:dts-bounces@dpdk.org] On Behalf Of yufengmx
> Sent: Monday, August 19, 2019 12:57 PM
> To: dts@dpdk.org; Wang, Yinan <yinan.wang@intel.com>
> Cc: Mo, YufengX <yufengx.mo@intel.com>
> Subject: [dts] [PATCH V1]framework/qemu_libvirt: add new features
> 
> 
> *. support bridge setting in xml.
> *. support vhost-user setting in xml. support to add driver attribute by set
> option opt_queue and opt_setting.
> *. add __add_vm_net_tap method to add tap&bridge net device.
> *. add __generate_net_config_script method to set default br0 using script.
> *. add __write_config method to make recursion write config in xml config
> file.
> *. use dut logger to take the place of print.
> *. remove graphics vnc config in set_vm_default method, add add_vm_vnc
> method to do graphics vnc setting. This is aimed to keep the same as
> qemu_kvm method.
> *. add add_vm_daemon method to keep the same as qemu_kvm method.
> libvirt run virtual machine with daemon status by default.
> 
> Signed-off-by: yufengmx <yufengx.mo@intel.com>
> ---
>  framework/qemu_libvirt.py | 327
> ++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 273 insertions(+), 54 deletions(-)
> 
> diff --git a/framework/qemu_libvirt.py b/framework/qemu_libvirt.py index
> 66fc54b..d2edd31 100644
> --- a/framework/qemu_libvirt.py
> +++ b/framework/qemu_libvirt.py
> @@ -44,9 +44,25 @@ from config import VIRTCONF  from exception import
> StartVMFailedException  import xml.etree.ElementTree as ET  from
> xml.etree.ElementTree import ElementTree
> +from xml.dom import minidom
> 
> 
>  class LibvirtKvm(VirtBase):
> +    DEFAULT_BRIDGE = 'br0'
> +    QEMU_IFUP = "#!/bin/sh\n\n" + \
> +                "set -x\n\n" + \
> +                "switch=%(switch)s\n\n" + \
> +                "if [ -n '$1' ];then\n" + \
> +                "   tunctl -t $1\n" + \
> +                "   ip link set $1 up\n" + \
> +                "   sleep 0.5s\n" + \
> +                "   brctl addif $switch $1\n" + \
> +                "   exit 0\n" + \
> +                "else\n" + \
> +                "   echo 'Error: no interface specified'\n" + \
> +                "   exit 1\n" + \
> +                "fi"
> +    QEMU_IFUP_PATH = '/etc/qemu-ifup'
> 
>      def __init__(self, dut, name, suite):
>          # initialize virtualization base module @@ -55,6 +71,7 @@ class
> LibvirtKvm(VirtBase):
>          # initialize qemu emulator, example: qemu-system-x86_64
>          self.qemu_emulator = self.get_qemu_emulator()
> 
> +        self.logger = dut.logger
>          # disk and pci device default index
>          self.diskindex = 'a'
>          self.controllerindex = 0
> @@ -102,7 +119,8 @@ class LibvirtKvm(VirtBase):
>          """
>          arch = self.host_session.send_expect('uname -m', '# ')
>          if arch == 'aarch64':
> -            out = self.host_session.send_expect('service libvirtd status', "# ")
> +            out = self.host_session.send_expect(
> +                'service libvirtd status', "# ")
>              if 'active (running)' not in out:
>                  return False
>              return True
> @@ -214,13 +232,12 @@ class LibvirtKvm(VirtBase):
>          os = self.domain.find('os')
>          if 'loader' in options.keys():
>              loader = ET.SubElement(
> -            os, 'loader', {'readonly': 'yes', 'type': 'pflash'})
> +                os, 'loader', {'readonly': 'yes', 'type': 'pflash'})
>              loader.text = options['loader']
>          if 'nvram' in options.keys():
>              nvram = ET.SubElement(os, 'nvram')
>              nvram.text = options['nvram']
> 
> -
>      def set_vm_default_aarch64(self):
>          os = ET.SubElement(self.domain, 'os')
>          type = ET.SubElement(
> @@ -231,7 +248,7 @@ class LibvirtKvm(VirtBase):
>          ET.SubElement(features, 'acpi')
> 
>          ET.SubElement(self.domain, 'cpu',
> -            {'mode': 'host-passthrough', 'check': 'none'})
> +                      {'mode': 'host-passthrough', 'check': 'none'})
> 
>      def set_vm_default_x86_64(self):
>          os = ET.SubElement(self.domain, 'os') @@ -252,15 +269,11 @@ class
> LibvirtKvm(VirtBase):
>          set_default_func = getattr(self, 'set_vm_default_' + arch)
>          if callable(set_default_func):
>              set_default_func()
> -
> 
>          # qemu-kvm for emulator
>          device = ET.SubElement(self.domain, 'devices')
>          ET.SubElement(device, 'emulator').text = self.qemu_emulator
> 
> -        # graphic device
> -        ET.SubElement(device, 'graphics', {
> -                      'type': 'vnc', 'port': '-1', 'autoport': 'yes'})
>          # qemu guest agent
>          self.add_vm_qga(None)
> 
> @@ -338,15 +351,48 @@ class LibvirtKvm(VirtBase):
> 
>          if 'opt_controller' in options:
>              controller = ET.SubElement(devices, 'controller',
> -                {'type': bus,
> -                'index': hex(self.controllerindex)[2:],
> -                'model': options['opt_controller']})
> +                                       {'type': bus,
> +                                        'index': hex(self.controllerindex)[2:],
> +                                        'model':
> + options['opt_controller']})
>              self.controllerindex += 1
> -            ET.SubElement(controller, 'address',
> +            ET.SubElement(
> +                controller, 'address',
>                  {'type': 'pci', 'domain': '0x0000', 'bus': hex(self.pciindex),
> -                'slot': '0x00', 'function': '0x00'})
> +                 'slot': '0x00', 'function': '0x00'})
>              self.pciindex += 1
> 
> +    def add_vm_daemon(self, **options):
> +        pass
> +
> +    def add_vm_vnc(self, **options):
> +        """
> +        Add VM display option
> +        """
> +        disable = options.get('disable')
> +        if disable and disable == 'True':
> +            return
> +        else:
> +            displayNum = options.get('displayNum')
> +            port = \
> +                displayNum if displayNum else \
> +                self.virt_pool.alloc_port(self.vm_name, port_type="display")
> +        ip = self.host_dut.get_ip_address()
> +        # set main block
> +        graphics = {
> +            'type': 'vnc',
> +            'port': port,
> +            'autoport': 'yes',
> +            'listen': ip,
> +            'keymap': 'en-us', }
> +
> +        devices = self.domain.find('devices')
> +        graphics = ET.SubElement(devices, 'graphics', graphics)
> +        # set sub block
> +        listen = {
> +            'type': 'address',
> +            'address': ip, }
> +        ET.SubElement(graphics, 'listen', listen)
> +
>      def add_vm_serial_port(self, **options):
>          if 'enable' in options.keys():
>              if options['enable'].lower() == 'yes':
> @@ -356,18 +402,26 @@ class LibvirtKvm(VirtBase):
>                  else:
>                      serial_type = 'unix'
>                  if serial_type == 'pty':
> -                    serial = ET.SubElement(devices, 'serial', {'type': serial_type})
> +                    serial = ET.SubElement(
> +                        devices, 'serial', {'type': serial_type})
>                      ET.SubElement(serial, 'target', {'port': '0'})
>                  elif serial_type == 'unix':
> -                    serial = ET.SubElement(devices, 'serial', {'type': serial_type})
> +                    serial = ET.SubElement(
> +                        devices, 'serial', {'type': serial_type})
>                      self.serial_path = "/tmp/%s_serial.sock" % self.vm_name
> -                    ET.SubElement(serial, 'source', {'mode': 'bind', 'path':
> self.serial_path})
> +                    ET.SubElement(
> +                        serial,
> +                        'source',
> +                        {'mode': 'bind', 'path': self.serial_path})
>                      ET.SubElement(serial, 'target', {'port': '0'})
>                  else:
> -                    print utils.RED("Serial type %s is not supported!" % serial_type)
> +                    msg = "Serial type %s is not supported!" % serial_type
> +                    self.logger.error(msg)
>                      return False
> -                console = ET.SubElement(devices, 'console', {'type': serial_type})
> -                ET.SubElement(console, 'target', {'type': 'serial', 'port': '0'})
> +                console = ET.SubElement(
> +                    devices, 'console', {'type': serial_type})
> +                ET.SubElement(
> +                    console, 'target', {'type': 'serial', 'port': '0'})
> 
>      def add_vm_login(self, **options):
>          """
> @@ -396,67 +450,201 @@ class LibvirtKvm(VirtBase):
>              bus = m.group(1)
>              slot = m.group(2)
>              func = m.group(3)
> -            dom  = '0'
> +            dom = '0'
>              return (bus, slot, func, dom)
>          m = re.match(pci_regex_domain, pci_address)
>          if m is not None:
>              bus = m.group(2)
>              slot = m.group(3)
>              func = m.group(4)
> -            dom  = m.group(1)
> +            dom = m.group(1)
>              return (bus, slot, func, dom)
>          return None
> 
>      def set_vm_device(self, driver='pci-assign', **opts):
> +        opts['driver'] = driver
>          self.add_vm_device(**opts)
> 
> -    def add_vm_device(self, **options):
> +    def __generate_net_config_script(self, switch=DEFAULT_BRIDGE):
>          """
> -        options:
> -            pf_idx: device index of pass-through device
> -            guestpci: assigned pci address in vm
> +        Generate a script for qemu emulator to build a tap device
> +        between host and guest.
>          """
> -        devices = self.domain.find('devices')
> -        hostdevice = ET.SubElement(devices, 'hostdev', {
> -                                   'mode': 'subsystem', 'type': 'pci',
> -                                   'managed': 'yes'})
> +        qemu_ifup = self.QEMU_IFUP % {'switch': switch}
> +        file_name = os.path.basename(self.QEMU_IFUP_PATH)
> +        tmp_file_path = '/tmp/%s' % file_name
> +        self.host_dut.create_file(qemu_ifup, tmp_file_path)
> +        self.host_session.send_expect(
> +            'mv -f ~/%s %s' % (file_name, self.QEMU_IFUP_PATH), '# ')
> +        self.host_session.send_expect(
> +            'chmod +x %s' % self.QEMU_IFUP_PATH, '# ')
> 
> -        if 'opt_host' in options.keys():
> -            pci_addr = options['opt_host']
> -        else:
> -            print utils.RED("Missing opt_host for device option!!!")
> -            return False
> +    def __parse_opt_setting(self, opt_settings):
> +        if '=' not in opt_settings:
> +            msg = 'wrong opt_settings setting'
> +            raise Exception(msg)
> +        setting = [item.split('=') for item in opt_settings.split(',')]
> +        return dict(setting)
> 
> +    def __get_pci_addr_config(self, pci):
> +        pci = self.__parse_pci(pci)
> +        if pci is None:
> +            msg = 'Invalid guestpci for host device pass-through !!!'
> +            self.logger.error(msg)
> +            return False
> +        bus, slot, func, dom = pci
> +        config = {
> +            'type': 'pci', 'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
> +            'slot': '0x%s' % slot, 'function': '0x%s' % func}
> +        return config
> +
> +    def __write_config(self, parent, configs):
> +        for config in configs:
> +            node_name = config[0]
> +            opt = config[1]
> +            node = ET.SubElement(parent, node_name, opt)
> +            if len(config) == 3:
> +                self.__write_config(node, config[2])
> +
> +    def __set_vm_bridge_interface(self, **options):
> +        mac = options.get('opt_mac')
> +        opt_br = options.get('opt_br')
> +        if not mac or not opt_br:
> +            msg = "Missing some bridge device option !!!"
> +            self.logger.error(msg)
> +            return False
> +        _config = [
> +            ['mac', {'address': mac}],
> +            ['source', {'bridge': opt_br, }],
> +            ['model', {'type': 'virtio', }]]
> +        config = [['interface', {'type': 'bridge'}, _config]]
> +        # set xml file
> +        parent = self.domain.find('devices')
> +        self.__write_config(parent, config)
> +
> +    def __add_vm_virtio_user_pci(self, **options):
> +        mac = options.get('opt_mac')
> +        mode = options.get('opt_server') or 'client'
> +        # unix socket path of character device
> +        sock_path = options.get('opt_path')
> +        queue = options.get('opt_queue')
> +        settings = options.get('opt_settings')
> +        # pci address in virtual machine
> +        pci = options.get('opt_host')
> +        if not mac or not sock_path:
> +            msg = "Missing some vhostuser device option !!!"
> +            self.logger.error(msg)
> +            return False
> +        node_name = 'interface'
> +        # basic options
> +        _config = [
> +            ['mac', {'address': mac}],
> +            ['source', {'type': 'unix',
> +                        'path': sock_path,
> +                        'mode': mode, }],
> +            ['model', {'type': 'virtio', }]]
> +        # append pci address
> +        if pci:
> +            _config.append(['address', self.__get_pci_addr_config(pci)])
> +        if queue or settings:
> +            drv_config = {'name': 'vhost'}
> +            if settings:
> +                _sub_opt = self.__parse_opt_setting(settings)
> +                drv_opt = {}
> +                guest_opt = {}
> +                host_opt = {}
> +                for key, value in _sub_opt.iteritems():
> +                    if key.startswith('host_'):
> +                        host_opt[key[5:]] = value
> +                        continue
> +                    if key.startswith('guest_'):
> +                        guest_opt[key[6:]] = value
> +                        continue
> +                    drv_opt[key] = value
> +                drv_config.update(drv_opt)
> +                sub_drv_config = []
> +                if host_opt:
> +                    sub_drv_config.append(['host', host_opt])
> +                if guest_opt:
> +                    sub_drv_config.append(['guest', guest_opt])
> +            # The optional queues attribute controls the number of queues to
> be
> +            # used for either Multiqueue virtio-net or vhost-user network
> +            # interfaces. Each queue will potentially be handled by a different
> +            # processor, resulting in much higher throughput. virtio-net since
> +            # 1.0.6 (QEMU and KVM only) vhost-user since 1.2.17(QEMU and
> KVM
> +            # only).
> +            if queue:
> +                drv_config.update({'queues': queue, })
> +            # set driver config
> +            if sub_drv_config:
> +                _config.append(['driver', drv_config, sub_drv_config])
> +            else:
> +                _config.append(['driver', drv_config])
> +        config = [[node_name, {'type': 'vhostuser'}, _config]]
> +        # set xml file
> +        parent = self.domain.find('devices')
> +        self.__write_config(parent, config)
> 
> +    def __add_vm_pci_assign(self, **options):
> +        devices = self.domain.find('devices')
> +        # add hostdev config block
> +        config = {
> +            'mode': 'subsystem',
> +            'type': 'pci',
> +            'managed': 'yes'}
> +        hostdevice = ET.SubElement(devices, 'hostdev', config)
> +        # add hostdev/source config block
> +        pci_addr = options.get('opt_host')
> +        if not pci_addr:
> +            msg = "Missing opt_host for device option!!!"
> +            self.logger.error(msg)
> +            return False
>          pci = self.__parse_pci(pci_addr)
>          if pci is None:
>              return False
>          bus, slot, func, dom = pci
> -
>          source = ET.SubElement(hostdevice, 'source')
> -        ET.SubElement(source, 'address', {
> -                      'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
> -                      'slot': '0x%s' % slot,
> -                      'function': '0x%s' % func})
> -        if 'guestpci' in options.keys():
> -            guest_pci_addr = options['guestpci']
> -        else:
> +        config = {
> +            'domain': '0x%s' % dom,
> +            'bus': '0x%s' % bus,
> +            'slot': '0x%s' % slot,
> +            'function': '0x%s' % func}
> +        ET.SubElement(source, 'address', config)
> +        # add hostdev/source/address config block
> +        guest_pci_addr = options.get('guestpci')
> +        if not guest_pci_addr:
>              guest_pci_addr = '0000:%s:00.0' % hex(self.pciindex)[2:]
>              self.pciindex += 1
> -        pci = self.__parse_pci(guest_pci_addr)
> -        if pci is None:
> -            print utils.RED('Invalid guestpci for host device pass-through!!!')
> -            return False
> -        bus, slot, func, dom = pci
> -        ET.SubElement(hostdevice, 'address', {
> -              'type': 'pci', 'domain': '0x%s' % dom, 'bus': '0x%s' % bus,
> -              'slot': '0x%s' % slot, 'function': '0x%s' % func})
> +        config = self.__get_pci_addr_config(guest_pci_addr)
> +        ET.SubElement(hostdevice, 'address', config)
>          # save host and guest pci address mapping
>          pci_map = {}
>          pci_map['hostpci'] = pci_addr
>          pci_map['guestpci'] = guest_pci_addr
>          self.pci_maps.append(pci_map)
> 
> +    def add_vm_device(self, **options):
> +        """
> +        options:
> +            pf_idx: device index of pass-through device
> +            guestpci: assigned pci address in vm
> +        """
> +        driver_table = {
> +            'vhost-user':
> +                self.__add_vm_virtio_user_pci,
> +            'bridge':
> +                self.__set_vm_bridge_interface,
> +            'pci-assign':
> +                self.__add_vm_pci_assign,
> +        }
> +        driver = options.get('driver')
> +        if not driver or driver not in driver_table.keys():
> +            driver = 'pci-assign'
> +            msg = 'use {0} configuration as default driver'.format(driver)
> +            self.logger.warning(msg)
> +        func = driver_table.get(driver)
> +        func(**options)
> +
>      def add_vm_net(self, **options):
>          """
>          Options:
> @@ -465,6 +653,8 @@ class LibvirtKvm(VirtBase):
>          if 'type' in options.keys():
>              if options['type'] == 'nic':
>                  self.__add_vm_net_nic(**options)
> +            elif options['type'] == 'tap':
> +                self.__add_vm_net_tap(**options)
> 
>      def __add_vm_net_nic(self, **options):
>          """
> @@ -506,6 +696,30 @@ class LibvirtKvm(VirtBase):
>              ET.SubElement(qemu, 'qemu:arg', {'value': 'user,hostfwd='
>                                               'tcp:%s:%d-:22' % (dut_ip, port)})
> 
> +    def __add_vm_net_tap(self, **options):
> +        """
> +        type: tap
> +        opt_br: br0
> +            note: if choosing tap, need to specify bridge name,
> +                  else it will be br0.
> +        opt_script: QEMU_IFUP_PATH
> +            note: if not specified, default is self.QEMU_IFUP_PATH.
> +        """
> +        _config = [['target', {'dev': 'tap0'}]]
> +        # add bridge info
> +        opt_br = options.get('opt_br')
> +        bridge = opt_br if opt_br else self.DEFAULT_BRIDGE
> +        _config.append(['source', {'bridge': bridge}])
> +        self.__generate_net_config_script(str(bridge))
> +        # add network configure script path
> +        opt_script = options.get('opt_script')
> +        script_path = opt_script if opt_script else self.QEMU_IFUP_PATH
> +        _config.append(['script', {'path': script_path}])
> +        config = [['interface', {'type': 'bridge'}, _config]]
> +        # set xml file
> +        parent = self.domain.find('devices')
> +        self.__write_config(parent, config)
> +
>      def add_vm_virtio_serial_channel(self, **options):
>          """
>          Options:
> @@ -516,7 +730,8 @@ class LibvirtKvm(VirtBase):
>          channel = ET.SubElement(devices, 'channel', {'type': 'unix'})
>          for opt in ['path', 'name']:
>              if opt not in options.keys():
> -                print "invalid virtio serial channel setting"
> +                msg = "invalid virtio serial channel setting"
> +                self.logger.error(msg)
>                  return
> 
>          ET.SubElement(
> @@ -570,11 +785,15 @@ class LibvirtKvm(VirtBase):
> 
>      def _start_vm(self):
>          xml_file = "/tmp/%s.xml" % self.vm_name
> -        try:
> +        if os.path.exists(xml_file):
>              os.remove(xml_file)
> -        except:
> -            pass
>          self.root.write(xml_file)
> +        with open(xml_file, 'rb') as fp:
> +            content = fp.read()
> +        doc = minidom.parseString(content)
> +        vm_content = doc.toprettyxml(indent='    ')
> +        with open(xml_file, 'wb') as fp:
> +            fp.write(vm_content)
>          self.host_session.copy_file_to(xml_file)
>          time.sleep(2)
> 
> --
> 1.9.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-08-28  6:09 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-19  4:57 [dts] [PATCH V1]framework/qemu_libvirt: add new features yufengmx
2019-08-19  4:57 ` yufengmx
2019-08-19  7:25   ` Wang, Yinan
2019-08-28  6:09   ` Tu, Lijuan
2019-08-19  7:26 ` Wang, Yinan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).