Coverage for nova/virt/libvirt/guest.py: 94%
383 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
1# Copyright 2010 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# All Rights Reserved.
4# Copyright (c) 2010 Citrix Systems, Inc.
5# Copyright (c) 2011 Piston Cloud Computing, Inc
6# Copyright (c) 2012 University Of Minho
7# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
8# Copyright (c) 2015 Red Hat, Inc
9#
10# Licensed under the Apache License, Version 2.0 (the "License"); you may
11# not use this file except in compliance with the License. You may obtain
12# a copy of the License at
13#
14# http://www.apache.org/licenses/LICENSE-2.0
15#
16# Unless required by applicable law or agreed to in writing, software
17# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
18# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
19# License for the specific language governing permissions and limitations
20# under the License.
22"""
23Manages information about the guest.
25This class encapsulates libvirt domain provides certain
26higher level APIs around the raw libvirt API. These APIs are
27then used by all the other libvirt related classes
28"""
30import time
31import typing as ty
33from lxml import etree
34from oslo_log import log as logging
35from oslo_utils import encodeutils
36from oslo_utils import excutils
37from oslo_utils import importutils
39from nova.compute import power_state
40from nova import exception
41from nova.i18n import _
42from nova.virt import hardware
43from nova.virt.libvirt import config as vconfig
46if ty.TYPE_CHECKING: 46 ↛ 47line 46 didn't jump to line 47 because the condition on line 46 was never true
47 import libvirt
48else:
49 libvirt = None
51try:
52 import libvirtmod_qemu
53except ImportError:
54 libvirtmod_qemu = None
57LOG = logging.getLogger(__name__)
59VIR_DOMAIN_NOSTATE = 0
60VIR_DOMAIN_RUNNING = 1
61VIR_DOMAIN_BLOCKED = 2
62VIR_DOMAIN_PAUSED = 3
63VIR_DOMAIN_SHUTDOWN = 4
64VIR_DOMAIN_SHUTOFF = 5
65VIR_DOMAIN_CRASHED = 6
66VIR_DOMAIN_PMSUSPENDED = 7
68LIBVIRT_POWER_STATE = {
69 VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
70 VIR_DOMAIN_RUNNING: power_state.RUNNING,
71 # The DOMAIN_BLOCKED state is only valid in Xen. It means that
72 # the VM is running and the vCPU is idle. So, we map it to RUNNING
73 VIR_DOMAIN_BLOCKED: power_state.RUNNING,
74 VIR_DOMAIN_PAUSED: power_state.PAUSED,
75 # The libvirt API doc says that DOMAIN_SHUTDOWN means the domain
76 # is being shut down. So technically the domain is still
77 # running. SHUTOFF is the real powered off state. But we will map
78 # both to SHUTDOWN anyway.
79 # http://libvirt.org/html/libvirt-libvirt.html
80 VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
81 VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
82 VIR_DOMAIN_CRASHED: power_state.CRASHED,
83 VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
84}
86# https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockJobType
87VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN = 0
88VIR_DOMAIN_BLOCK_JOB_TYPE_PULL = 1
89VIR_DOMAIN_BLOCK_JOB_TYPE_COPY = 2
90VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT = 3
91VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT = 4
92VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP = 5
93VIR_DOMAIN_BLOCK_JOB_TYPE_LAST = 6
95LIBVIRT_BLOCK_JOB_TYPE = {
96 VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN: 'UNKNOWN',
97 VIR_DOMAIN_BLOCK_JOB_TYPE_PULL: 'PULL',
98 VIR_DOMAIN_BLOCK_JOB_TYPE_COPY: 'COPY',
99 VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT: 'COMMIT',
100 VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT: 'ACTIVE_COMMIT',
101 VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP: 'BACKUP',
102 VIR_DOMAIN_BLOCK_JOB_TYPE_LAST: 'LAST',
103}
106class Guest(object):
108 def __init__(self, domain):
110 global libvirt
111 if libvirt is None: 111 ↛ 112line 111 didn't jump to line 112 because the condition on line 111 was never true
112 libvirt = importutils.import_module('libvirt')
114 self._domain = domain
116 def __repr__(self):
117 return "<Guest %(id)d %(name)s %(uuid)s>" % {
118 'id': self.id,
119 'name': self.name,
120 'uuid': self.uuid
121 }
123 @property
124 def id(self):
125 return self._domain.ID()
127 @property
128 def uuid(self):
129 return self._domain.UUIDString()
131 @property
132 def name(self):
133 return self._domain.name()
135 @property
136 def _encoded_xml(self):
137 return encodeutils.safe_decode(self._domain.XMLDesc(0))
139 @classmethod
140 def create(cls, xml, host):
141 """Create a new Guest
143 :param xml: XML definition of the domain to create
144 :param host: host.Host connection to define the guest on
146 :returns guest.Guest: Guest ready to be launched
147 """
148 try:
149 if isinstance(xml, bytes): 149 ↛ 150line 149 didn't jump to line 150 because the condition on line 149 was never true
150 xml = xml.decode('utf-8')
151 guest = host.write_instance_config(xml)
152 except Exception:
153 with excutils.save_and_reraise_exception():
154 LOG.error('Error defining a guest with XML: %s',
155 encodeutils.safe_decode(xml))
156 return guest
158 def launch(self, pause=False):
159 """Starts a created guest.
161 :param pause: Indicates whether to start and pause the guest
162 """
163 flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
164 try:
165 return self._domain.createWithFlags(flags)
166 except Exception:
167 with excutils.save_and_reraise_exception():
168 LOG.exception('Error launching a defined domain with XML: %s',
169 self._encoded_xml, errors='ignore')
171 def poweroff(self):
172 """Stops a running guest."""
173 self._domain.destroy()
175 def sync_guest_time(self):
176 """Try to set VM time to the current value. This is typically useful
177 when clock wasn't running on the VM for some time (e.g. during
178 suspension or migration), especially if the time delay exceeds NTP
179 tolerance.
181 It is not guaranteed that the time is actually set (it depends on guest
182 environment, especially QEMU agent presence) or that the set time is
183 very precise (NTP in the guest should take care of it if needed).
184 """
185 t = time.time()
186 seconds = int(t)
187 nseconds = int((t - seconds) * 10 ** 9)
188 try:
189 self._domain.setTime(time={'seconds': seconds,
190 'nseconds': nseconds})
191 except libvirt.libvirtError as e:
192 code = e.get_error_code()
193 if code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE: 193 ↛ 194line 193 didn't jump to line 194 because the condition on line 193 was never true
194 LOG.debug('Failed to set time: QEMU agent unresponsive',
195 instance_uuid=self.uuid)
196 elif code == libvirt.VIR_ERR_OPERATION_UNSUPPORTED: 196 ↛ 197line 196 didn't jump to line 197 because the condition on line 196 was never true
197 LOG.debug('Failed to set time: not supported',
198 instance_uuid=self.uuid)
199 elif code == libvirt.VIR_ERR_ARGUMENT_UNSUPPORTED: 199 ↛ 200line 199 didn't jump to line 200 because the condition on line 199 was never true
200 LOG.debug('Failed to set time: agent not configured',
201 instance_uuid=self.uuid)
202 else:
203 LOG.warning('Failed to set time: %(reason)s',
204 {'reason': e}, instance_uuid=self.uuid)
205 except Exception as ex:
206 # The highest priority is not to let this method crash and thus
207 # disrupt its caller in any way. So we swallow this error here,
208 # to be absolutely safe.
209 LOG.debug('Failed to set time: %(reason)s',
210 {'reason': ex}, instance_uuid=self.uuid)
211 else:
212 LOG.debug('Time updated to: %d.%09d', seconds, nseconds,
213 instance_uuid=self.uuid)
215 def inject_nmi(self):
216 """Injects an NMI to a guest."""
217 self._domain.injectNMI()
219 def resume(self):
220 """Resumes a paused guest."""
221 self._domain.resume()
223 def get_interfaces(self):
224 """Returns a list of all network interfaces for this domain."""
225 doc = None
227 try:
228 doc = etree.fromstring(self._encoded_xml)
229 except Exception:
230 return []
232 interfaces = []
234 nodes = doc.findall('./devices/interface/target')
235 for target in nodes:
236 interfaces.append(target.get('dev'))
238 return interfaces
240 def get_interface_by_cfg(
241 self,
242 cfg: vconfig.LibvirtConfigGuestDevice,
243 from_persistent_config: bool = False
244 ) -> ty.Optional[vconfig.LibvirtConfigGuestDevice]:
245 """Lookup a full LibvirtConfigGuestDevice with
246 LibvirtConfigGuesDevice generated
247 by nova.virt.libvirt.vif.get_config.
249 :param cfg: config object that represents the guest interface.
250 :param from_persistent_config: query the device from the persistent
251 domain instead of the live domain configuration
252 :returns: nova.virt.libvirt.config.LibvirtConfigGuestDevice instance
253 if found, else None
254 """
256 if cfg:
257 LOG.debug(f'looking for interface given config: {cfg}')
258 interfaces = self.get_all_devices(
259 type(cfg), from_persistent_config)
260 if not interfaces:
261 LOG.debug(f'No interface of type: {type(cfg)} found in domain')
262 return None
263 # FIXME(sean-k-mooney): we should be able to print the list of
264 # interfaces however some tests use incomplete objects that can't
265 # be printed due to incomplete mocks or defects in the libvirt
266 # fixture. Lets address this later.
267 # LOG.debug(f'within interfaces: {list(interfaces)}')
268 for interface in interfaces:
269 # NOTE(leehom) LibvirtConfigGuest get from domain and
270 # LibvirtConfigGuest generated by
271 # nova.virt.libvirt.vif.get_config must be identical.
272 # NOTE(gibi): LibvirtConfigGuest subtypes does a custom
273 # equality check based on available information on nova side
274 if cfg == interface:
275 return interface
276 else:
277 # NOTE(sean-k-mooney): {list(interfaces)} could be used
278 # instead of self._domain.XMLDesc(0) once all tests have
279 # printable interfaces see the comment above ^.
280 # While the XML is more verbose it should always work
281 # for our current test suite and in production code.
282 LOG.debug(
283 f'interface for config: {cfg}'
284 f'not found in domain: {self._domain.XMLDesc(0)}'
285 )
286 return None
288 def get_vcpus_info(self):
289 """Returns virtual cpus information of guest.
291 :returns: guest.VCPUInfo
292 """
293 vcpus = self._domain.vcpus()
294 for vcpu in vcpus[0]:
295 yield VCPUInfo(
296 id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
298 def delete_configuration(self):
299 """Undefines a domain from hypervisor."""
300 try:
301 flags = libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
302 flags |= libvirt.VIR_DOMAIN_UNDEFINE_NVRAM
303 self._domain.undefineFlags(flags)
304 except libvirt.libvirtError:
305 LOG.debug("Error from libvirt during undefineFlags for guest "
306 "%d. Retrying with undefine", self.id)
307 self._domain.undefine()
308 except AttributeError:
309 # Older versions of libvirt don't support undefine flags,
310 # trying to remove managed image
311 try:
312 if self._domain.hasManagedSaveImage(0): 312 ↛ 316line 312 didn't jump to line 316 because the condition on line 312 was always true
313 self._domain.managedSaveRemove(0)
314 except AttributeError:
315 pass
316 self._domain.undefine()
318 def has_persistent_configuration(self):
319 """Whether domain config is persistently stored on the host."""
320 return self._domain.isPersistent()
322 def attach_device(self, conf, persistent=False, live=False):
323 """Attaches device to the guest.
325 :param conf: A LibvirtConfigObject of the device to attach
326 :param persistent: A bool to indicate whether the change is
327 persistent or not
328 :param live: A bool to indicate whether it affect the guest
329 in running state
330 """
331 flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
332 flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
334 device_xml = conf.to_xml()
336 LOG.debug("attach device xml: %s", device_xml)
337 self._domain.attachDeviceFlags(device_xml, flags=flags)
339 def set_metadata(self, metadata, persistent=False, live=False):
340 """Set metadata to the guest.
342 Please note that this function completely replaces the existing
343 metadata. The scope of the replacement is limited to the Nova-specific
344 XML Namespace.
346 :param metadata: A LibvirtConfigGuestMetaNovaInstance
347 :param persistent: A bool to indicate whether the change is
348 persistent or not
349 :param live: A bool to indicate whether it affect the guest
350 in running state
351 """
352 flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
353 flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
355 metadata_xml = metadata.to_xml()
356 LOG.debug("set metadata xml: %s", metadata_xml)
357 self._domain.setMetadata(libvirt.VIR_DOMAIN_METADATA_ELEMENT,
358 metadata_xml, "instance",
359 vconfig.NOVA_NS, flags=flags)
361 def get_config(self):
362 """Returns the config instance for a guest
364 :returns: LibvirtConfigGuest instance
365 """
366 config = vconfig.LibvirtConfigGuest()
367 config.parse_str(self._domain.XMLDesc(0))
368 return config
370 def get_disk(
371 self,
372 device: str,
373 from_persistent_config: bool = False
374 ) -> ty.Optional[vconfig.LibvirtConfigGuestDisk]:
375 """Returns the disk mounted at device
377 :param device: the name of either the source or the target device
378 :param from_persistent_config: query the device from the persistent
379 domain (i.e. inactive XML configuration that'll be used on next
380 start of the domain) instead of the live domain configuration
381 :returns LibvirtConfigGuestDisk: mounted at device or None
382 """
383 flags = 0
384 if from_persistent_config:
385 flags |= libvirt.VIR_DOMAIN_XML_INACTIVE
386 try:
387 doc = etree.fromstring(self._domain.XMLDesc(flags))
388 except Exception:
389 return None
391 # FIXME(lyarwood): Workaround for the device being either a target dev
392 # when called via swap_volume or source file when called via
393 # live_snapshot. This should be removed once both are refactored to use
394 # only the target dev of the device.
395 node = doc.find("./devices/disk/target[@dev='%s'].." % device)
396 if node is None:
397 node = doc.find("./devices/disk/source[@file='%s'].." % device)
399 if node is not None:
400 conf = vconfig.LibvirtConfigGuestDisk()
401 conf.parse_dom(node)
402 return conf
404 return None
406 def get_all_disks(self):
407 """Returns all the disks for a guest
409 :returns: a list of LibvirtConfigGuestDisk instances
410 """
412 return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
414 def get_device_by_alias(self, devalias, devtype=None,
415 from_persistent_config=False):
416 for dev in self.get_all_devices(devtype):
417 if hasattr(dev, 'alias') and dev.alias == devalias:
418 return dev
420 def get_all_devices(
421 self,
422 devtype: vconfig.LibvirtConfigGuestDevice = None,
423 from_persistent_config: bool = False
424 ) -> ty.List[vconfig.LibvirtConfigGuestDevice]:
425 """Returns all devices for a guest
427 :param devtype: a LibvirtConfigGuestDevice subclass class
428 :param from_persistent_config: query the device from the persistent
429 domain (i.e. inactive XML configuration that'll be used on next
430 start of the domain) instead of the live domain configuration
431 :returns: a list of LibvirtConfigGuestDevice instances
432 """
434 flags = 0
435 if from_persistent_config:
436 flags |= libvirt.VIR_DOMAIN_XML_INACTIVE
438 try:
439 config = vconfig.LibvirtConfigGuest()
440 config.parse_str(
441 self._domain.XMLDesc(flags))
442 except Exception:
443 return []
445 devs = []
446 for dev in config.devices:
447 if (devtype is None or
448 isinstance(dev, devtype)):
449 devs.append(dev)
450 return devs
452 def detach_device(self, conf, persistent=False, live=False):
453 """Detaches device to the guest.
455 :param conf: A LibvirtConfigObject of the device to detach
456 :param persistent: A bool to indicate whether the change is
457 persistent or not
458 :param live: A bool to indicate whether it affect the guest
459 in running state
460 """
461 flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
462 flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
464 device_xml = conf.to_xml()
466 LOG.debug("detach device xml: %s", device_xml)
467 self._domain.detachDeviceFlags(device_xml, flags=flags)
469 def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
470 dump_migratable=False):
471 """Returns xml description of guest.
473 :param dump_inactive: Dump inactive domain information
474 :param dump_sensitive: Dump security sensitive information
475 :param dump_migratable: Dump XML suitable for migration
477 :returns string: XML description of the guest
478 """
479 flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
480 flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
481 flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
482 return self._domain.XMLDesc(flags=flags)
484 def save_memory_state(self):
485 """Saves the domain's memory state. Requires running domain.
487 raises: raises libvirtError on error
488 """
489 self._domain.managedSave(0)
491 def get_block_device(self, disk):
492 """Returns a block device wrapper for disk."""
493 return BlockDevice(self, disk)
495 def set_user_password(self, user, new_pass):
496 """Configures a new user password."""
497 self._domain.setUserPassword(user, new_pass, 0)
499 def _get_domain_info(self):
500 """Returns information on Guest.
502 :returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
503 """
504 return self._domain.info()
506 def get_info(self, host):
507 """Retrieve information from libvirt for a specific instance name.
509 If a libvirt error is encountered during lookup, we might raise a
510 NotFound exception or Error exception depending on how severe the
511 libvirt error is.
513 :returns hardware.InstanceInfo:
514 """
515 try:
516 dom_info = self._get_domain_info()
517 except libvirt.libvirtError as ex:
518 error_code = ex.get_error_code()
519 if error_code == libvirt.VIR_ERR_NO_DOMAIN: 519 ↛ 522line 519 didn't jump to line 522 because the condition on line 519 was always true
520 raise exception.InstanceNotFound(instance_id=self.uuid)
522 msg = (_('Error from libvirt while getting domain info for '
523 '%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
524 {'instance_name': self.name,
525 'error_code': error_code,
526 'ex': ex})
527 raise exception.InternalError(msg)
529 return hardware.InstanceInfo(
530 state=LIBVIRT_POWER_STATE[dom_info[0]],
531 internal_id=self.id)
533 def get_power_state(self, host):
534 return self.get_info(host).state
536 def is_active(self):
537 "Determines whether guest is currently running."
538 return self._domain.isActive()
540 def freeze_filesystems(self):
541 """Freeze filesystems within guest."""
542 self._domain.fsFreeze()
544 def thaw_filesystems(self):
545 """Thaw filesystems within guest."""
546 self._domain.fsThaw()
548 def snapshot(self, conf, no_metadata=False,
549 disk_only=False, reuse_ext=False, quiesce=False):
550 """Creates a guest snapshot.
552 :param conf: libvirt.LibvirtConfigGuestSnapshotDisk
553 :param no_metadata: Make snapshot without remembering it
554 :param disk_only: Disk snapshot, no system checkpoint
555 :param reuse_ext: Reuse any existing external files
556 :param quiesce: Use QGA to quiesce all mounted file systems
557 """
558 flags = no_metadata and (
559 libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
560 flags |= disk_only and (
561 libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY or 0)
562 flags |= reuse_ext and (
563 libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT or 0)
564 flags |= quiesce and libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE or 0
566 device_xml = conf.to_xml()
568 self._domain.snapshotCreateXML(device_xml, flags=flags)
570 def shutdown(self):
571 """Shutdown guest"""
572 self._domain.shutdown()
574 def pause(self):
575 """Suspends an active guest
577 Process is frozen without further access to CPU resources and
578 I/O but the memory used by the domain at the hypervisor level
579 will stay allocated.
581 See method "resume()" to reactive guest.
582 """
583 self._domain.suspend()
585 def migrate(self, destination, migrate_uri=None, migrate_disks=None,
586 destination_xml=None, flags=0, bandwidth=0):
587 """Migrate guest object from its current host to the destination
589 :param destination: URI of host destination where guest will be migrate
590 :param migrate_uri: URI for invoking the migration
591 :param migrate_disks: List of disks to be migrated
592 :param destination_xml: The guest XML to be used on the target host
593 :param flags: May be one of more of the following:
594 VIR_MIGRATE_LIVE Do not pause the VM during migration
595 VIR_MIGRATE_PEER2PEER Direct connection between source &
596 destination hosts
597 VIR_MIGRATE_TUNNELLED Tunnel migration data over the
598 libvirt RPC channel
599 VIR_MIGRATE_PERSIST_DEST If the migration is successful,
600 persist the domain on the
601 destination host.
602 VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful,
603 undefine the domain on the
604 source host.
605 VIR_MIGRATE_NON_SHARED_INC Migration with non-shared
606 storage with incremental disk
607 copy
608 VIR_MIGRATE_AUTO_CONVERGE Slow down domain to make sure it does
609 not change its memory faster than a
610 hypervisor can transfer the changed
611 memory to the destination host
612 VIR_MIGRATE_POSTCOPY Tell libvirt to enable post-copy migration
613 VIR_MIGRATE_TLS Use QEMU-native TLS
614 :param bandwidth: The maximum bandwidth in MiB/s
615 """
616 params = {}
617 # In migrateToURI3 these parameters are extracted from the
618 # `params` dict
619 params['bandwidth'] = bandwidth
621 if destination_xml:
622 params['destination_xml'] = destination_xml
623 params['persistent_xml'] = destination_xml
624 if migrate_disks:
625 params['migrate_disks'] = migrate_disks
626 if migrate_uri:
627 params['migrate_uri'] = migrate_uri
629 # Due to a quirk in the libvirt python bindings,
630 # VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is
631 # interpreted as "block migrate all writable disks" rather than
632 # "don't block migrate any disks". This includes attached
633 # volumes, which will potentially corrupt data on those
634 # volumes. Consequently we need to explicitly unset
635 # VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block
636 # migrated.
637 if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and
638 not params.get('migrate_disks')):
639 flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
641 self._domain.migrateToURI3(
642 destination, params=params, flags=flags)
644 def abort_job(self):
645 """Requests to abort current background job"""
646 self._domain.abortJob()
648 def migrate_configure_max_downtime(self, mstime):
649 """Sets maximum time for which domain is allowed to be paused
651 :param mstime: Downtime in milliseconds.
652 """
653 self._domain.migrateSetMaxDowntime(mstime)
655 def migrate_start_postcopy(self):
656 """Switch running live migration to post-copy mode"""
657 self._domain.migrateStartPostCopy()
659 def announce_self(self):
660 libvirtmod_qemu.virDomainQemuMonitorCommand(
661 self._domain._o, 'announce_self', 1)
663 def get_job_info(self):
664 """Get job info for the domain
666 Query the libvirt job info for the domain (ie progress
667 of migration, or snapshot operation)
669 :returns: a JobInfo of guest
670 """
671 if JobInfo._have_job_stats: 671 ↛ 704line 671 didn't jump to line 704 because the condition on line 671 was always true
672 try:
673 stats = self._domain.jobStats()
674 return JobInfo(**stats)
675 except libvirt.libvirtError as ex:
676 errmsg = ex.get_error_message()
677 if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
678 # Remote libvirt doesn't support new API
679 LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
680 JobInfo._have_job_stats = False
681 return JobInfo._get_job_stats_compat(self._domain)
682 elif ex.get_error_code() in (
683 libvirt.VIR_ERR_NO_DOMAIN,
684 libvirt.VIR_ERR_OPERATION_INVALID):
685 # Transient guest finished migration, so it has gone
686 # away completclsely
687 LOG.debug("Domain has shutdown/gone away: %s", ex)
688 return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
689 elif (ex.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR and
690 errmsg and "migration was active, "
691 "but no RAM info was set" in errmsg):
692 LOG.debug("Migration is active or completed but "
693 "virDomainGetJobStats is missing ram: %s", ex)
694 return JobInfo(type=libvirt.VIR_DOMAIN_JOB_NONE)
695 else:
696 LOG.debug("Failed to get job stats: %s", ex)
697 raise
698 except AttributeError as ex:
699 # Local python binding doesn't support new API
700 LOG.debug("Missing local virDomainGetJobStats: %s", ex)
701 JobInfo._have_job_stats = False
702 return JobInfo._get_job_stats_compat(self._domain)
703 else:
704 return JobInfo._get_job_stats_compat(self._domain)
707class BlockDevice(object):
708 """Wrapper around block device API"""
710 REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
711 COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
713 def __init__(self, guest, disk):
714 self._guest = guest
715 self._disk = disk
717 def abort_job(self, async_=False, pivot=False):
718 """Request to cancel a live block device job
720 :param async_: Cancel the block device job (e.g. 'copy' or
721 'commit'), and return as soon as possible, without
722 waiting for job completion
723 :param pivot: Pivot to the destination image when ending a
724 'copy' or "active commit" (meaning: merging the
725 contents of current active disk into its backing
726 file) job
727 """
728 flags = async_ and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
729 flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
730 self._guest._domain.blockJobAbort(self._disk, flags=flags)
732 def get_job_info(self):
733 """Returns information about job currently running
735 :returns: BlockDeviceJobInfo, or None if no job exists
736 :raises: libvirt.libvirtError on error fetching block job info
737 """
739 # libvirt's blockJobInfo() raises libvirt.libvirtError if there was an
740 # error. It returns {} if the job no longer exists, or a fully
741 # populated dict if the job exists.
742 status = self._guest._domain.blockJobInfo(self._disk, flags=0)
744 # The job no longer exists
745 if not status:
746 return None
748 return BlockDeviceJobInfo(
749 job=status['type'],
750 bandwidth=status['bandwidth'],
751 cur=status['cur'],
752 end=status['end'])
754 def copy(self, dest_xml, shallow=False, reuse_ext=False, transient=False):
755 """Copy the guest-visible contents into a new disk
757 http://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockCopy
759 :param: dest_xml: XML describing the destination disk to copy to
760 :param: shallow: Limit copy to top of source backing chain
761 :param: reuse_ext: Reuse existing external file for a copy
762 :param: transient: Don't force usage of recoverable job for the copy
763 operation
764 """
765 flags = shallow and libvirt.VIR_DOMAIN_BLOCK_COPY_SHALLOW or 0
766 flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_COPY_REUSE_EXT or 0
767 flags |= transient and libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB or 0
768 return self._guest._domain.blockCopy(self._disk, dest_xml, flags=flags)
770 def rebase(self, base, shallow=False, reuse_ext=False,
771 copy=False, relative=False, copy_dev=False):
772 """Copy data from backing chain into a new disk
774 This copies data from backing file(s) into overlay(s), giving
775 control over several aspects like what part of a disk image
776 chain to be copied, whether to reuse an existing destination
777 file, etc. And updates the backing file to the new disk
779 :param shallow: Limit copy to top of the source backing chain
780 :param reuse_ext: Reuse an existing external file that was
781 pre-created
782 :param copy: Start a copy job
783 :param relative: Keep backing chain referenced using relative names
784 :param copy_dev: Treat the destination as type="block"
785 """
786 flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
787 flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
788 flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
789 flags |= copy_dev and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV or 0
790 flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
791 return self._guest._domain.blockRebase(
792 self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
794 def commit(self, base, top, relative=False):
795 """Merge data from overlays into backing file
797 This live merges (or "commits") contents from backing files into
798 overlays, thus reducing the length of a disk image chain.
800 :param relative: Keep backing chain referenced using relative names
801 """
802 flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
803 return self._guest._domain.blockCommit(
804 self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
806 def resize(self, size):
807 """Resize block device to the given size in bytes.
809 This resizes the block device within the instance to the given size.
811 :param size: The size to resize the device to in bytes.
812 """
813 flags = libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES
814 self._guest._domain.blockResize(self._disk, size, flags=flags)
816 def is_job_complete(self):
817 """Return True if the job is complete, False otherwise
819 :returns: True if the job is complete, False otherwise
820 :raises: libvirt.libvirtError on error fetching block job info
821 """
822 # NOTE(mdbooth): This method polls for block job completion. It returns
823 # true if either we get a status which indicates completion, or there
824 # is no longer a record of the job. Ideally this method and its
825 # callers would be rewritten to consume libvirt events from the job.
826 # This would provide a couple of advantages. Firstly, as it would no
827 # longer be polling it would notice completion immediately rather than
828 # at the next 0.5s check, and would also consume fewer resources.
829 # Secondly, with the current method we only know that 'no job'
830 # indicates completion. It does not necessarily indicate successful
831 # completion: the job could have failed, or been cancelled. When
832 # polling for block job info we have no way to detect this, so we
833 # assume success.
835 status = self.get_job_info()
837 # If the job no longer exists, it is because it has completed
838 # NOTE(mdbooth): See comment above: it may not have succeeded.
839 if status is None:
840 return True
842 # Track blockjob progress in DEBUG, helpful when reviewing failures.
843 job_type = LIBVIRT_BLOCK_JOB_TYPE.get(
844 status.job, f"Unknown to Nova ({status.job})")
845 LOG.debug("%(job_type)s block job progress, current cursor: %(cur)s "
846 "final cursor: %(end)s",
847 {'job_type': job_type, 'cur': status.cur, 'end': status.end})
849 # NOTE(lyarwood): Use the mirror element to determine if we can pivot
850 # to the new disk once blockjobinfo reports progress as complete.
851 if status.cur == status.end:
852 disk = self._guest.get_disk(self._disk)
853 if disk and disk.mirror:
854 return disk.mirror.ready == 'yes'
856 return False
858 def blockStats(self):
859 """Extracts block device statistics for a domain"""
860 return self._guest._domain.blockStats(self._disk)
863class VCPUInfo(object):
864 def __init__(self, id, cpu, state, time):
865 """Structure for information about guest vcpus.
867 :param id: The virtual cpu number
868 :param cpu: The host cpu currently associated
869 :param state: The running state of the vcpu (0 offline, 1 running, 2
870 blocked on resource)
871 :param time: The cpu time used in nanoseconds
872 """
873 self.id = id
874 self.cpu = cpu
875 self.state = state
876 self.time = time
879class BlockDeviceJobInfo(object):
880 def __init__(self, job, bandwidth, cur, end):
881 """Structure for information about running job.
883 :param job: The running job (0 placeholder, 1 pull,
884 2 copy, 3 commit, 4 active commit)
885 :param bandwidth: Used in MiB/s
886 :param cur: Indicates the position between 0 and 'end'
887 :param end: Indicates the position for this operation
888 """
889 self.job = job
890 self.bandwidth = bandwidth
891 self.cur = cur
892 self.end = end
895class JobInfo(object):
896 """Information about libvirt background jobs
898 This class encapsulates information about libvirt
899 background jobs. It provides a mapping from either
900 the old virDomainGetJobInfo API which returned a
901 fixed list of fields, or the modern virDomainGetJobStats
902 which returns an extendable dict of fields.
903 """
905 _have_job_stats = True
907 def __init__(self, **kwargs):
909 self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
910 self.time_elapsed = kwargs.get("time_elapsed", 0)
911 self.time_remaining = kwargs.get("time_remaining", 0)
912 self.downtime = kwargs.get("downtime", 0)
913 self.setup_time = kwargs.get("setup_time", 0)
914 self.data_total = kwargs.get("data_total", 0)
915 self.data_processed = kwargs.get("data_processed", 0)
916 self.data_remaining = kwargs.get("data_remaining", 0)
917 self.memory_total = kwargs.get("memory_total", 0)
918 self.memory_processed = kwargs.get("memory_processed", 0)
919 self.memory_remaining = kwargs.get("memory_remaining", 0)
920 self.memory_iteration = kwargs.get("memory_iteration", 0)
921 self.memory_constant = kwargs.get("memory_constant", 0)
922 self.memory_normal = kwargs.get("memory_normal", 0)
923 self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
924 self.memory_bps = kwargs.get("memory_bps", 0)
925 self.disk_total = kwargs.get("disk_total", 0)
926 self.disk_processed = kwargs.get("disk_processed", 0)
927 self.disk_remaining = kwargs.get("disk_remaining", 0)
928 self.disk_bps = kwargs.get("disk_bps", 0)
929 self.comp_cache = kwargs.get("compression_cache", 0)
930 self.comp_bytes = kwargs.get("compression_bytes", 0)
931 self.comp_pages = kwargs.get("compression_pages", 0)
932 self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
933 self.comp_overflow = kwargs.get("compression_overflow", 0)
935 @classmethod
936 def _get_job_stats_compat(cls, dom):
937 # Make the old virDomainGetJobInfo method look similar to the
938 # modern virDomainGetJobStats method
939 try:
940 info = dom.jobInfo()
941 except libvirt.libvirtError as ex:
942 # When migration of a transient guest completes, the guest
943 # goes away so we'll see NO_DOMAIN error code
944 #
945 # When migration of a persistent guest completes, the guest
946 # merely shuts off, but libvirt unhelpfully raises an
947 # OPERATION_INVALID error code
948 #
949 # Lets pretend both of these mean success
950 if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN, 950 ↛ 955line 950 didn't jump to line 955 because the condition on line 950 was always true
951 libvirt.VIR_ERR_OPERATION_INVALID):
952 LOG.debug("Domain has shutdown/gone away: %s", ex)
953 return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
954 else:
955 LOG.debug("Failed to get job info: %s", ex)
956 raise
958 return cls(
959 type=info[0],
960 time_elapsed=info[1],
961 time_remaining=info[2],
962 data_total=info[3],
963 data_processed=info[4],
964 data_remaining=info[5],
965 memory_total=info[6],
966 memory_processed=info[7],
967 memory_remaining=info[8],
968 disk_total=info[9],
969 disk_processed=info[10],
970 disk_remaining=info[11])