Coverage for nova/virt/libvirt/host.py: 88%
899 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
1# Copyright 2010 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# All Rights Reserved.
4# Copyright (c) 2010 Citrix Systems, Inc.
5# Copyright (c) 2011 Piston Cloud Computing, Inc
6# Copyright (c) 2012 University Of Minho
7# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
8#
9# Licensed under the Apache License, Version 2.0 (the "License"); you may
10# not use this file except in compliance with the License. You may obtain
11# a copy of the License at
12#
13# http://www.apache.org/licenses/LICENSE-2.0
14#
15# Unless required by applicable law or agreed to in writing, software
16# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18# License for the specific language governing permissions and limitations
19# under the License.
21"""
22Manages information about the host OS and hypervisor.
24This class encapsulates a connection to the libvirt
25daemon and provides certain higher level APIs around
26the raw libvirt API. These APIs are then used by all
27the other libvirt related classes
28"""
30from collections import defaultdict
31import fnmatch
32import glob
33import inspect
34from lxml import etree
35import operator
36import os
37import queue
38import threading
39import typing as ty
41from eventlet import greenio
42from eventlet import greenthread
43from eventlet import patcher
44from eventlet import tpool
45from oslo_log import log as logging
46from oslo_serialization import jsonutils
47from oslo_utils import excutils
48from oslo_utils import importutils
49from oslo_utils import strutils
50from oslo_utils import units
51from oslo_utils import versionutils
53from nova.compute import utils as compute_utils
54import nova.conf
55from nova import context as nova_context
56from nova import exception
57from nova.i18n import _
58from nova.objects import fields
59from nova.pci import utils as pci_utils
60from nova import rpc
61from nova import utils
62from nova.virt import event as virtevent
63from nova.virt.libvirt import config as vconfig
64from nova.virt.libvirt import driver
65from nova.virt.libvirt import event as libvirtevent
66from nova.virt.libvirt import guest as libvirt_guest
67from nova.virt.libvirt import migration as libvirt_migrate
68from nova.virt.libvirt import utils as libvirt_utils
69import nova.virt.node # noqa
71if ty.TYPE_CHECKING: 71 ↛ 72line 71 didn't jump to line 72 because the condition on line 71 was never true
72 import libvirt
73else:
74 libvirt = None
76LOG = logging.getLogger(__name__)
78native_socket = patcher.original('socket')
79native_threading = patcher.original("threading")
80native_Queue = patcher.original("queue")
82CONF = nova.conf.CONF
85# This list is for libvirt hypervisor drivers that need special handling.
86# This is *not* the complete list of supported hypervisor drivers.
87HV_DRIVER_QEMU = "QEMU"
89SEV_KERNEL_PARAM_FILE = '/sys/module/kvm_amd/parameters/sev'
91# These are taken from the spec
92# https://github.com/qemu/qemu/blob/v5.2.0/docs/interop/firmware.json
93QEMU_FIRMWARE_DESCRIPTOR_PATHS = [
94 '/usr/share/qemu/firmware',
95 '/etc/qemu/firmware',
96 # we intentionally ignore '$XDG_CONFIG_HOME/qemu/firmware'
97]
100def _get_loaders():
101 if not any( 101 ↛ 104line 101 didn't jump to line 104 because the condition on line 101 was never true
102 os.path.exists(path) for path in QEMU_FIRMWARE_DESCRIPTOR_PATHS
103 ):
104 msg = _("Failed to locate firmware descriptor files")
105 raise exception.InternalError(msg)
107 _loaders = []
109 for path in QEMU_FIRMWARE_DESCRIPTOR_PATHS:
110 if not os.path.exists(path):
111 continue
113 for spec_path in sorted(glob.glob(f'{path}/*.json')):
114 with open(spec_path, 'rb') as fh:
115 spec = jsonutils.load(fh)
117 _loaders.append(spec)
119 return _loaders
122class Host(object):
124 def __init__(self, uri, read_only=False,
125 conn_event_handler=None,
126 lifecycle_event_handler=None):
128 global libvirt
129 if libvirt is None: 129 ↛ 130line 129 didn't jump to line 130 because the condition on line 129 was never true
130 libvirt = importutils.import_module('libvirt')
132 self._uri = uri
133 self._read_only = read_only
134 self._initial_connection = True
135 self._conn_event_handler = conn_event_handler
136 self._conn_event_handler_queue: queue.Queue[ty.Callable] = (
137 queue.Queue())
138 self._lifecycle_event_handler = lifecycle_event_handler
139 self._caps = None
140 self._domain_caps = None
141 self._hostname = None
142 self._node_uuid = None
144 self._wrapped_conn = None
145 self._wrapped_conn_lock = threading.Lock()
146 self._event_queue: ty.Optional[queue.Queue[ty.Callable]] = None
148 self._events_delayed = {}
149 # Note(toabctl): During a reboot of a domain, STOPPED and
150 # STARTED events are sent. To prevent shutting
151 # down the domain during a reboot, delay the
152 # STOPPED lifecycle event some seconds.
153 self._lifecycle_delay = 15
155 self._initialized = False
156 self._libvirt_proxy_classes = self._get_libvirt_proxy_classes(libvirt)
157 self._libvirt_proxy = self._wrap_libvirt_proxy(libvirt)
159 self._loaders: ty.Optional[ty.List[dict]] = None
161 # A number of features are conditional on support in the hardware,
162 # kernel, QEMU, and/or libvirt. These are determined on demand and
163 # memoized by various properties below
164 self._supports_amd_sev: ty.Optional[bool] = None
165 self._max_sev_guests: ty.Optional[int] = None
166 self._max_sev_es_guests: ty.Optional[int] = None
167 self._supports_uefi: ty.Optional[bool] = None
168 self._supports_secure_boot: ty.Optional[bool] = None
170 self._has_hyperthreading: ty.Optional[bool] = None
172 @staticmethod
173 def _get_libvirt_proxy_classes(libvirt_module):
174 """Return a tuple for tpool.Proxy's autowrap argument containing all
175 public vir* classes defined by the libvirt module.
176 """
178 # Get a list of (name, class) tuples of libvirt classes
179 classes = inspect.getmembers(libvirt_module, inspect.isclass)
181 # Return a list of just the vir* classes, filtering out libvirtError
182 # and any private globals pointing at private internal classes.
183 return tuple([cls[1] for cls in classes if cls[0].startswith("vir")])
185 def _wrap_libvirt_proxy(self, obj):
186 """Return an object wrapped in a tpool.Proxy using autowrap appropriate
187 for the libvirt module.
188 """
190 # libvirt is not pure python, so eventlet monkey patching doesn't work
191 # on it. Consequently long-running libvirt calls will not yield to
192 # eventlet's event loop, starving all other greenthreads until
193 # completion. eventlet's tpool.Proxy handles this situation for us by
194 # executing proxied calls in a native thread.
195 return tpool.Proxy(obj, autowrap=self._libvirt_proxy_classes)
197 def _native_thread(self):
198 """Receives async events coming in from libvirtd.
200 This is a native thread which runs the default
201 libvirt event loop implementation. This processes
202 any incoming async events from libvirtd and queues
203 them for later dispatch. This thread is only
204 permitted to use libvirt python APIs, and the
205 driver.queue_event method. In particular any use
206 of logging is forbidden, since it will confuse
207 eventlet's greenthread integration
208 """
210 while True:
211 libvirt.virEventRunDefaultImpl()
213 def _dispatch_thread(self):
214 """Dispatches async events coming in from libvirtd.
216 This is a green thread which waits for events to
217 arrive from the libvirt event loop thread. This
218 then dispatches the events to the compute manager.
219 """
221 while True:
222 self._dispatch_events()
224 def _conn_event_thread(self):
225 """Dispatches async connection events"""
226 # NOTE(mdbooth): This thread doesn't need to jump through the same
227 # hoops as _dispatch_thread because it doesn't interact directly
228 # with the libvirt native thread.
229 while True:
230 self._dispatch_conn_event()
232 def _dispatch_conn_event(self):
233 # NOTE(mdbooth): Splitting out this loop looks redundant, but it
234 # means we can easily dispatch events synchronously from tests and
235 # it isn't completely awful.
236 handler = self._conn_event_handler_queue.get()
237 try:
238 handler()
239 except Exception:
240 LOG.exception('Exception handling connection event')
241 finally:
242 self._conn_event_handler_queue.task_done()
244 @staticmethod
245 def _event_device_removed_callback(conn, dom, dev, opaque):
246 """Receives device removed events from libvirt.
248 NB: this method is executing in a native thread, not
249 an eventlet coroutine. It can only invoke other libvirt
250 APIs, or use self._queue_event(). Any use of logging APIs
251 in particular is forbidden.
252 """
253 self = opaque
254 uuid = dom.UUIDString()
255 self._queue_event(libvirtevent.DeviceRemovedEvent(uuid, dev))
257 @staticmethod
258 def _event_device_removal_failed_callback(conn, dom, dev, opaque):
259 """Receives device removed events from libvirt.
261 NB: this method is executing in a native thread, not
262 an eventlet coroutine. It can only invoke other libvirt
263 APIs, or use self._queue_event(). Any use of logging APIs
264 in particular is forbidden.
265 """
266 self = opaque
267 uuid = dom.UUIDString()
268 self._queue_event(libvirtevent.DeviceRemovalFailedEvent(uuid, dev))
270 @staticmethod
271 def _event_lifecycle_callback(conn, dom, event, detail, opaque):
272 """Receives lifecycle events from libvirt.
274 NB: this method is executing in a native thread, not
275 an eventlet coroutine. It can only invoke other libvirt
276 APIs, or use self._queue_event(). Any use of logging APIs
277 in particular is forbidden.
278 """
280 self = opaque
282 uuid = dom.UUIDString()
283 transition = None
284 if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
285 transition = virtevent.EVENT_LIFECYCLE_STOPPED
286 elif event == libvirt.VIR_DOMAIN_EVENT_STARTED: 286 ↛ 287line 286 didn't jump to line 287 because the condition on line 286 was never true
287 transition = virtevent.EVENT_LIFECYCLE_STARTED
288 elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED: 288 ↛ 314line 288 didn't jump to line 314 because the condition on line 288 was always true
289 if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
290 transition = virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED
291 elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED: 291 ↛ 313line 291 didn't jump to line 313 because the condition on line 291 was always true
292 # VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED is also sent when live
293 # migration of the guest fails, so we cannot simply rely
294 # on the event itself but need to check if the job itself was
295 # successful.
296 # NOTE(mriedem): The job check logic here is copied from
297 # LibvirtDriver._live_migration_monitor.
298 guest = libvirt_guest.Guest(dom)
299 info = guest.get_job_info()
300 if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
301 # Either still running, or failed or completed,
302 # lets untangle the mess.
303 info.type = libvirt_migrate.find_job_type(
304 guest, instance=None, logging_ok=False)
306 if info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
307 transition = virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED
308 else:
309 # Failed or some other status we don't know about, so just
310 # opt to report the guest is paused.
311 transition = virtevent.EVENT_LIFECYCLE_PAUSED
312 else:
313 transition = virtevent.EVENT_LIFECYCLE_PAUSED
314 elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
315 transition = virtevent.EVENT_LIFECYCLE_RESUMED
317 if transition is not None: 317 ↛ exitline 317 didn't return from function '_event_lifecycle_callback' because the condition on line 317 was always true
318 self._queue_event(virtevent.LifecycleEvent(uuid, transition))
320 def _close_callback(self, conn, reason, opaque):
321 close_info = {'conn': conn, 'reason': reason}
322 self._queue_event(close_info)
324 @staticmethod
325 def _test_connection(conn):
326 try:
327 conn.getLibVersion()
328 return True
329 except libvirt.libvirtError as e:
330 if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, 330 ↛ 336line 330 didn't jump to line 336 because the condition on line 330 was always true
331 libvirt.VIR_ERR_INTERNAL_ERROR) and
332 e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
333 libvirt.VIR_FROM_RPC)):
334 LOG.debug('Connection to libvirt broke')
335 return False
336 raise
338 @staticmethod
339 def _connect_auth_cb(creds, opaque):
340 if len(creds) == 0: 340 ↛ 341line 340 didn't jump to line 341 because the condition on line 340 was never true
341 return 0
342 raise exception.InternalError(
343 _("Can not handle authentication request for %d credentials")
344 % len(creds))
346 def _connect(self, uri, read_only):
347 auth = [[libvirt.VIR_CRED_AUTHNAME,
348 libvirt.VIR_CRED_ECHOPROMPT,
349 libvirt.VIR_CRED_REALM,
350 libvirt.VIR_CRED_PASSPHRASE,
351 libvirt.VIR_CRED_NOECHOPROMPT,
352 libvirt.VIR_CRED_EXTERNAL],
353 Host._connect_auth_cb,
354 None]
356 flags = 0
357 if read_only:
358 flags = libvirt.VIR_CONNECT_RO
359 return self._libvirt_proxy.openAuth(uri, auth, flags)
361 def _queue_event(self, event):
362 """Puts an event on the queue for dispatch.
364 This method is called by the native event thread to
365 put events on the queue for later dispatch by the
366 green thread. Any use of logging APIs is forbidden.
367 """
369 if self._event_queue is None: 369 ↛ 370line 369 didn't jump to line 370 because the condition on line 369 was never true
370 return
372 # Queue the event...
373 self._event_queue.put(event)
375 # ...then wakeup the green thread to dispatch it
376 c = ' '.encode()
377 self._event_notify_send.write(c)
378 self._event_notify_send.flush()
380 def _dispatch_events(self):
381 """Wait for & dispatch events from native thread
383 Blocks until native thread indicates some events
384 are ready. Then dispatches all queued events.
385 """
387 # Wait to be notified that there are some
388 # events pending
389 try:
390 _c = self._event_notify_recv.read(1)
391 assert _c
392 except ValueError:
393 return # will be raised when pipe is closed
395 # Process as many events as possible without
396 # blocking
397 last_close_event = None
398 # required for mypy
399 if self._event_queue is None: 399 ↛ 400line 399 didn't jump to line 400 because the condition on line 399 was never true
400 return
401 while not self._event_queue.empty():
402 try:
403 event_type = ty.Union[
404 virtevent.InstanceEvent, ty.Mapping[str, ty.Any]]
405 event: event_type = self._event_queue.get(block=False)
406 if issubclass(type(event), virtevent.InstanceEvent): 406 ↛ 410line 406 didn't jump to line 410 because the condition on line 406 was always true
407 # call possibly with delay
408 self._event_emit_delayed(event)
410 elif 'conn' in event and 'reason' in event:
411 last_close_event = event
412 except native_Queue.Empty:
413 pass
414 if last_close_event is None: 414 ↛ 416line 414 didn't jump to line 416 because the condition on line 414 was always true
415 return
416 conn = last_close_event['conn']
417 # get_new_connection may already have disabled the host,
418 # in which case _wrapped_conn is None.
419 with self._wrapped_conn_lock:
420 if conn == self._wrapped_conn:
421 reason = str(last_close_event['reason'])
422 msg = _("Connection to libvirt lost: %s") % reason
423 self._wrapped_conn = None
424 self._queue_conn_event_handler(False, msg)
426 def _event_emit_delayed(self, event):
427 """Emit events - possibly delayed."""
428 def event_cleanup(gt, *args, **kwargs):
429 """Callback function for greenthread. Called
430 to cleanup the _events_delayed dictionary when an event
431 was called.
432 """
433 event = args[0]
434 self._events_delayed.pop(event.uuid, None)
436 # Cleanup possible delayed stop events.
437 if event.uuid in self._events_delayed.keys():
438 self._events_delayed[event.uuid].cancel()
439 self._events_delayed.pop(event.uuid, None)
440 LOG.debug("Removed pending event for %s due to event", event.uuid)
442 if (isinstance(event, virtevent.LifecycleEvent) and
443 event.transition == virtevent.EVENT_LIFECYCLE_STOPPED):
444 # Delay STOPPED event, as they may be followed by a STARTED
445 # event in case the instance is rebooting
446 id_ = greenthread.spawn_after(self._lifecycle_delay,
447 self._event_emit, event)
448 self._events_delayed[event.uuid] = id_
449 # add callback to cleanup self._events_delayed dict after
450 # event was called
451 id_.link(event_cleanup, event)
452 else:
453 self._event_emit(event)
455 def _event_emit(self, event):
456 if self._lifecycle_event_handler is not None: 456 ↛ exitline 456 didn't return from function '_event_emit' because the condition on line 456 was always true
457 self._lifecycle_event_handler(event)
459 def _init_events_pipe(self):
460 """Create a self-pipe for the native thread to synchronize on.
462 This code is taken from the eventlet tpool module, under terms
463 of the Apache License v2.0.
464 """
466 self._event_queue = native_Queue.Queue()
467 rpipe, wpipe = os.pipe()
468 self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
469 self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
471 def _init_events(self):
472 """Initializes the libvirt events subsystem.
474 This requires running a native thread to provide the
475 libvirt event loop integration. This forwards events
476 to a green thread which does the actual dispatching.
477 """
479 self._init_events_pipe()
481 LOG.debug("Starting native event thread")
482 self._event_thread = native_threading.Thread(
483 target=self._native_thread)
484 self._event_thread.daemon = True
485 self._event_thread.start()
487 LOG.debug("Starting green dispatch thread")
488 utils.spawn(self._dispatch_thread)
490 LOG.debug("Starting connection event dispatch thread")
491 utils.spawn(self._conn_event_thread)
493 def _get_new_connection(self):
494 # call with _wrapped_conn_lock held
495 LOG.debug('Connecting to libvirt: %s', self._uri)
497 # This will raise an exception on failure
498 wrapped_conn = self._connect(self._uri, self._read_only)
500 try:
501 LOG.debug("Registering for lifecycle events %s", self)
502 wrapped_conn.domainEventRegisterAny(
503 None,
504 libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
505 self._event_lifecycle_callback,
506 self)
507 wrapped_conn.domainEventRegisterAny(
508 None,
509 libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED,
510 self._event_device_removed_callback,
511 self)
512 wrapped_conn.domainEventRegisterAny(
513 None,
514 libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVAL_FAILED,
515 self._event_device_removal_failed_callback,
516 self)
517 except Exception as e:
518 LOG.warning("URI %(uri)s does not support events: %(error)s",
519 {'uri': self._uri, 'error': e})
521 try:
522 LOG.debug("Registering for connection events: %s", str(self))
523 wrapped_conn.registerCloseCallback(self._close_callback, None)
524 except libvirt.libvirtError as e:
525 LOG.warning("URI %(uri)s does not support connection"
526 " events: %(error)s",
527 {'uri': self._uri, 'error': e})
529 return wrapped_conn
531 def _queue_conn_event_handler(self, *args, **kwargs):
532 if self._conn_event_handler is None:
533 return
535 def handler():
536 return self._conn_event_handler(*args, **kwargs)
538 self._conn_event_handler_queue.put(handler)
540 def _get_connection(self):
541 # multiple concurrent connections are protected by _wrapped_conn_lock
542 with self._wrapped_conn_lock:
543 # Drop the existing connection if it is not usable
544 if (self._wrapped_conn is not None and
545 not self._test_connection(self._wrapped_conn)):
546 self._wrapped_conn = None
547 # Connection was previously up, and went down
548 self._queue_conn_event_handler(
549 False, _('Connection to libvirt lost'))
551 if self._wrapped_conn is None:
552 try:
553 # This will raise if it fails to get a connection
554 self._wrapped_conn = self._get_new_connection()
555 except Exception as ex:
556 with excutils.save_and_reraise_exception():
557 # If we previously had a connection and it went down,
558 # we generated a down event for that above.
559 # We also want to generate a down event for an initial
560 # failure, which won't be handled above.
561 if self._initial_connection:
562 self._queue_conn_event_handler(
563 False,
564 _('Failed to connect to libvirt: %(msg)s') %
565 {'msg': ex})
566 finally:
567 self._initial_connection = False
569 self._queue_conn_event_handler(True, None)
571 return self._wrapped_conn
573 def get_connection(self):
574 """Returns a connection to the hypervisor
576 This method should be used to create and return a well
577 configured connection to the hypervisor.
579 :returns: a libvirt.virConnect object
580 """
581 try:
582 conn = self._get_connection()
583 except libvirt.libvirtError as ex:
584 LOG.exception("Connection to libvirt failed: %s", ex)
585 payload = {'ip': CONF.my_ip, 'method': '_connect', 'reason': ex}
586 ctxt = nova_context.get_admin_context()
587 rpc.get_notifier('compute').error(ctxt,
588 'compute.libvirt.error',
589 payload)
590 compute_utils.notify_about_libvirt_connect_error(
591 ctxt, ip=CONF.my_ip, exception=ex)
592 raise exception.HypervisorUnavailable()
594 return conn
596 @staticmethod
597 def _libvirt_error_handler(context, err):
598 # Just ignore instead of default outputting to stderr.
599 pass
601 def initialize(self):
602 if self._initialized:
603 return
605 # NOTE(dkliban): Error handler needs to be registered before libvirt
606 # connection is used for the first time. Otherwise, the
607 # handler does not get registered.
608 libvirt.registerErrorHandler(self._libvirt_error_handler, None)
609 libvirt.virEventRegisterDefaultImpl()
610 self._init_events()
612 self._initialized = True
614 def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
615 op=operator.lt):
616 """Check libvirt version, hypervisor version, and hypervisor type
618 :param hv_type: hypervisor driver from the top of this file.
619 """
620 conn = self.get_connection()
621 try:
622 if lv_ver is not None:
623 libvirt_version = conn.getLibVersion()
624 if op(libvirt_version,
625 versionutils.convert_version_to_int(lv_ver)):
626 return False
628 if hv_ver is not None:
629 hypervisor_version = conn.getVersion()
630 if op(hypervisor_version,
631 versionutils.convert_version_to_int(hv_ver)):
632 return False
634 if hv_type is not None:
635 hypervisor_type = conn.getType()
636 if hypervisor_type != hv_type:
637 return False
639 return True
640 except Exception:
641 return False
643 def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
644 return self._version_check(
645 lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
647 def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
648 return self._version_check(
649 lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
651 def get_guest(self, instance):
652 """Retrieve libvirt guest object for an instance.
654 All libvirt error handling should be handled in this method and
655 relevant nova exceptions should be raised in response.
657 :param instance: a nova.objects.Instance object
659 :returns: a nova.virt.libvirt.Guest object
660 :raises exception.InstanceNotFound: The domain was not found
661 :raises exception.InternalError: A libvirt error occurred
662 """
663 return libvirt_guest.Guest(self._get_domain(instance))
665 def _get_domain(self, instance):
666 """Retrieve libvirt domain object for an instance.
668 All libvirt error handling should be handled in this method and
669 relevant nova exceptions should be raised in response.
671 :param instance: a nova.objects.Instance object
673 :returns: a libvirt.Domain object
674 :raises exception.InstanceNotFound: The domain was not found
675 :raises exception.InternalError: A libvirt error occurred
676 """
677 try:
678 conn = self.get_connection()
679 return conn.lookupByUUIDString(instance.uuid)
680 except libvirt.libvirtError as ex:
681 error_code = ex.get_error_code()
682 if error_code == libvirt.VIR_ERR_NO_DOMAIN: 682 ↛ 685line 682 didn't jump to line 685 because the condition on line 682 was always true
683 raise exception.InstanceNotFound(instance_id=instance.uuid)
685 msg = (_('Error from libvirt while looking up %(instance_name)s: '
686 '[Error Code %(error_code)s] %(ex)s') %
687 {'instance_name': instance.name,
688 'error_code': error_code,
689 'ex': ex})
690 raise exception.InternalError(msg)
692 def list_guests(self, only_running=True):
693 """Get a list of Guest objects for nova instances
695 :param only_running: True to only return running instances
697 See method "list_instance_domains" for more information.
699 :returns: list of Guest objects
700 """
701 domains = self.list_instance_domains(only_running=only_running)
702 return [libvirt_guest.Guest(dom) for dom in domains]
704 def list_instance_domains(self, only_running=True):
705 """Get a list of libvirt.Domain objects for nova instances
707 :param only_running: True to only return running instances
709 Query libvirt to a get a list of all libvirt.Domain objects
710 that correspond to nova instances. If the only_running parameter
711 is true this list will only include active domains, otherwise
712 inactive domains will be included too.
714 :returns: list of libvirt.Domain objects
715 """
716 flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
717 if not only_running:
718 flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
720 # listAllDomains() returns <list of virDomain>, not <virDomain>, so
721 # tpool.Proxy's autowrap won't catch it. We need to wrap the
722 # contents of the list we return.
723 alldoms = (self._wrap_libvirt_proxy(dom)
724 for dom in self.get_connection().listAllDomains(flags))
726 doms = []
727 for dom in alldoms:
728 doms.append(dom)
730 return doms
732 def get_available_cpus(self):
733 """Get the set of CPUs that exist on the host.
735 :returns: set of CPUs, raises libvirtError on error
736 """
737 cpus, cpu_map, online = self.get_connection().getCPUMap()
738 return {cpu for cpu in range(cpus)}
740 def get_online_cpus(self):
741 """Get the set of CPUs that are online on the host
743 :returns: set of online CPUs, raises libvirtError on error
744 """
745 cpus, cpu_map, online = self.get_connection().getCPUMap()
747 online_cpus = set()
748 for cpu in range(cpus):
749 if cpu_map[cpu]: 749 ↛ 748line 749 didn't jump to line 748 because the condition on line 749 was always true
750 online_cpus.add(cpu)
752 return online_cpus
754 def get_cpu_model_names(self):
755 """Get the cpu models based on host CPU arch
757 :returns: a list of cpu models which supported by the given CPU arch
758 """
759 arch = self.get_capabilities().host.cpu.arch
760 return self.get_connection().getCPUModelNames(arch)
762 @staticmethod
763 def _log_host_capabilities(xmlstr):
764 # NOTE(mriedem): This looks a bit weird but we do this so we can stub
765 # out this method in unit/functional test runs since the xml string is
766 # big and it can cause subunit parsing to fail (see bug 1813147).
767 LOG.info("Libvirt host capabilities %s", xmlstr)
769 def get_capabilities(self):
770 """Returns the host capabilities information
772 Returns an instance of config.LibvirtConfigCaps representing
773 the capabilities of the host.
775 Note: The result is cached in the member attribute _caps.
777 :returns: a config.LibvirtConfigCaps object
778 """
779 if self._caps:
780 return self._caps
782 xmlstr = self.get_connection().getCapabilities()
783 self._log_host_capabilities(xmlstr)
784 self._caps = vconfig.LibvirtConfigCaps()
785 self._caps.parse_str(xmlstr)
787 # NOTE(mriedem): Don't attempt to get baseline CPU features
788 # if libvirt can't determine the host cpu model.
789 if (
790 hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
791 self._caps.host.cpu.model is not None
792 ):
793 try:
794 xml_str = self._caps.host.cpu.to_xml()
795 if isinstance(xml_str, bytes): 795 ↛ 796line 795 didn't jump to line 796 because the condition on line 795 was never true
796 xml_str = xml_str.decode('utf-8')
797 # NOTE(kevinz): The baseline CPU info on Aarch64 will not
798 # include any features. So on Aarch64, we use the original
799 # features from LibvirtConfigCaps.
800 if self._caps.host.cpu.arch != fields.Architecture.AARCH64:
801 features = self.get_connection().baselineCPU(
802 [xml_str],
803 libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
804 if features:
805 cpu = vconfig.LibvirtConfigCPU()
806 cpu.parse_str(features)
807 self._caps.host.cpu.features = cpu.features
808 except libvirt.libvirtError as ex:
809 error_code = ex.get_error_code()
810 if error_code == libvirt.VIR_ERR_NO_SUPPORT:
811 LOG.warning(
812 "URI %(uri)s does not support full set of host "
813 "capabilities: %(error)s",
814 {'uri': self._uri, 'error': ex})
815 else:
816 raise
818 return self._caps
820 def get_domain_capabilities(self):
821 """Returns the capabilities you can request when creating a
822 domain (VM) with that hypervisor, for various combinations of
823 architecture and machine type.
825 In this context the fuzzy word "hypervisor" implies QEMU
826 binary, libvirt itself and the host config. libvirt provides
827 this in order that callers can determine what the underlying
828 emulator and/or libvirt is capable of, prior to creating a domain
829 (for instance via virDomainCreateXML or virDomainDefineXML).
830 However nova needs to know the capabilities much earlier, when
831 the host's compute service is first initialised, in order that
832 placement decisions can be made across many compute hosts.
833 Therefore this is expected to be called during the init_host()
834 phase of the driver lifecycle rather than just before booting
835 an instance.
837 This causes an additional complication since the Python
838 binding for this libvirt API call requires the architecture
839 and machine type to be provided. So in order to gain a full
840 picture of the hypervisor's capabilities, technically we need
841 to call it with the right parameters, once for each
842 (architecture, machine_type) combination which we care about.
843 However the libvirt experts have advised us that in practice
844 the domain capabilities do not (yet, at least) vary enough
845 across machine types to justify the cost of calling
846 getDomainCapabilities() once for every single (architecture,
847 machine_type) combination. In particular, SEV support isn't
848 reported per-machine type, and since there are usually many
849 machine types, we heed the advice of the experts that it's
850 typically sufficient to call it once per host architecture:
852 https://bugzilla.redhat.com/show_bug.cgi?id=1683471#c7
854 However, that's not quite sufficient in the context of nova,
855 because SEV guests typically require a q35 machine type, as do
856 KVM/QEMU guests that want Secure Boot, whereas the current
857 default machine type for x86_64 is 'pc'. So we need results
858 from the getDomainCapabilities API for at least those two.
859 Fortunately we can take advantage of the results from the
860 getCapabilities API which marks selected machine types as
861 canonical, e.g.:
863 <machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
864 <machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
866 So for now, we call getDomainCapabilities for these canonical
867 machine types of each architecture, plus for the
868 architecture's default machine type, if that is not one of the
869 canonical types.
871 Future domain capabilities might report SEV in a more
872 fine-grained manner, and we also expect to use this method to
873 detect other features, such as for gracefully handling machine
874 types and potentially for detecting OVMF binaries. Therefore
875 we memoize the results of the API calls in a nested dict where
876 the top-level keys are architectures, and second-level keys
877 are machine types, in order to allow easy expansion later.
879 Whenever libvirt/QEMU are updated, cached domCapabilities
880 would get outdated (because QEMU will contain new features and
881 the capabilities will vary). However, this should not be a
882 problem here, because when libvirt/QEMU gets updated, the
883 nova-compute agent also needs restarting, at which point the
884 memoization will vanish because it's not persisted to disk.
886 Note: The result is cached in the member attribute
887 _domain_caps.
889 :returns: a nested dict of dicts which maps architectures to
890 machine types to instances of config.LibvirtConfigDomainCaps
891 representing the domain capabilities of the host for that arch and
892 machine type: ``{arch: machine_type: LibvirtConfigDomainCaps}{``
893 """
894 if self._domain_caps:
895 return self._domain_caps
897 domain_caps: ty.Dict = defaultdict(dict)
898 caps = self.get_capabilities()
899 virt_type = CONF.libvirt.virt_type
901 for guest in caps.guests:
902 arch = guest.arch
903 domain = guest.domains.get(virt_type, guest.default_domain)
905 for machine_type in self._get_machine_types(arch, domain):
906 # It is expected that if there are multiple <guest>
907 # elements, each will have a different architecture;
908 # for example, on x86 hosts one <guest> will contain
909 # <arch name='i686'> and one will contain <arch
910 # name='x86_64'>. But it doesn't hurt to add a safety
911 # net to avoid needlessly calling libvirt's API more
912 # times than we need.
913 if machine_type and machine_type in domain_caps[arch]: 913 ↛ 914line 913 didn't jump to line 914 because the condition on line 913 was never true
914 continue
915 self._add_to_domain_capabilities(domain.emulator, arch,
916 domain_caps, machine_type,
917 virt_type)
919 # NOTE(aspiers): Use a temporary variable to update the
920 # instance variable atomically, otherwise if some API
921 # calls succeeded and then one failed, we might
922 # accidentally memoize a partial result.
923 self._domain_caps = domain_caps
925 return self._domain_caps
927 def _get_machine_types(self, arch, domain):
928 """Get the machine types for this architecture for which we need to
929 call getDomainCapabilities, i.e. the canonical machine types,
930 and the default machine type (if it's not one of the canonical
931 machine types).
933 See the docstring for get_domain_capabilities() for an explanation
934 of why we choose this set of machine types.
935 """
936 # NOTE(aspiers): machine_type could be None here if nova
937 # doesn't have a default machine type for this architecture.
938 # See _add_to_domain_capabilities() below for how this is handled.
939 mtypes = set([libvirt_utils.get_default_machine_type(arch)])
940 mtypes.update(domain.aliases.keys())
941 LOG.debug("Getting domain capabilities for %(arch)s via "
942 "machine types: %(mtypes)s",
943 {'arch': arch, 'mtypes': mtypes})
944 return mtypes
946 def _add_to_domain_capabilities(self, emulator_bin, arch, domain_caps,
947 machine_type, virt_type):
948 # NOTE(aspiers): machine_type could be None here if nova
949 # doesn't have a default machine type for this architecture.
950 # In that case we pass a machine_type of None to the libvirt
951 # API and rely on it choosing a sensible default which will be
952 # returned in the <machine> element. It could also be an
953 # alias like 'pc' rather than a full machine type.
954 #
955 # NOTE(kchamart): Prior to libvirt v4.7.0 libvirt picked its
956 # default machine type for x86, 'pc', as reported by QEMU's
957 # default. From libvirt v4.7.0 onwards, libvirt _explicitly_
958 # declared the "preferred" default for x86 as 'pc' (and
959 # appropriate values for other architectures), and only uses
960 # QEMU's reported default (whatever that may be) if 'pc' does
961 # not exist. This was done "to isolate applications from
962 # hypervisor changes that may cause incompatibilities" --
963 # i.e. if, or when, QEMU changes its default machine type to
964 # something else. Refer to this libvirt commit:
965 #
966 # https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
967 try:
968 cap_obj = self._get_domain_capabilities(
969 emulator_bin=emulator_bin, arch=arch,
970 machine_type=machine_type, virt_type=virt_type)
971 except libvirt.libvirtError as ex:
972 # NOTE(sean-k-mooney): This can happen for several
973 # reasons, but one common example is if you have
974 # multiple QEMU emulators installed and you set
975 # virt-type=kvm. In this case any non-native emulator,
976 # e.g. AArch64 on an x86 host, will (correctly) raise
977 # an exception as KVM cannot be used to accelerate CPU
978 # instructions for non-native architectures.
979 error_code = ex.get_error_code()
980 LOG.debug(
981 "Error from libvirt when retrieving domain capabilities "
982 "for arch %(arch)s / virt_type %(virt_type)s / "
983 "machine_type %(mach_type)s: "
984 "[Error Code %(error_code)s]: %(exception)s",
985 {'arch': arch, 'virt_type': virt_type,
986 'mach_type': machine_type, 'error_code': error_code,
987 'exception': ex})
988 # Remove archs added by default dict lookup when checking
989 # if the machine type has already been recoded.
990 if arch in domain_caps:
991 domain_caps.pop(arch)
992 return
994 # Register the domain caps using the expanded form of
995 # machine type returned by libvirt in the <machine>
996 # element (e.g. pc-i440fx-2.11)
997 if cap_obj.machine_type: 997 ↛ 1002line 997 didn't jump to line 1002 because the condition on line 997 was always true
998 domain_caps[arch][cap_obj.machine_type] = cap_obj
999 else:
1000 # NOTE(aspiers): In theory this should never happen,
1001 # but better safe than sorry.
1002 LOG.warning(
1003 "libvirt getDomainCapabilities("
1004 "emulator_bin=%(emulator_bin)s, arch=%(arch)s, "
1005 "machine_type=%(machine_type)s, virt_type=%(virt_type)s) "
1006 "returned null <machine> type",
1007 {'emulator_bin': emulator_bin, 'arch': arch,
1008 'machine_type': machine_type, 'virt_type': virt_type}
1009 )
1011 # And if we passed an alias, register the domain caps
1012 # under that too.
1013 if machine_type and machine_type != cap_obj.machine_type:
1014 domain_caps[arch][machine_type] = cap_obj
1015 cap_obj.machine_type_alias = machine_type
1017 def _get_domain_capabilities(self, emulator_bin=None, arch=None,
1018 machine_type=None, virt_type=None, flags=0):
1019 xmlstr = self.get_connection().getDomainCapabilities(
1020 emulator_bin,
1021 arch,
1022 machine_type,
1023 virt_type,
1024 flags
1025 )
1026 LOG.debug("Libvirt host hypervisor capabilities for arch=%s and "
1027 "machine_type=%s:\n%s", arch, machine_type, xmlstr)
1028 caps = vconfig.LibvirtConfigDomainCaps()
1029 caps.parse_str(xmlstr)
1030 return caps
1032 def get_driver_type(self):
1033 """Get hypervisor type.
1035 :returns: hypervisor type (ex. qemu)
1037 """
1039 return self.get_connection().getType()
1041 def get_version(self):
1042 """Get hypervisor version.
1044 :returns: hypervisor version (ex. 12003)
1046 """
1048 return self.get_connection().getVersion()
1050 def get_hostname(self):
1051 """Returns the hostname of the hypervisor."""
1052 hostname = self.get_connection().getHostname()
1053 if self._hostname is None:
1054 self._hostname = hostname
1055 elif hostname != self._hostname: 1055 ↛ 1059line 1055 didn't jump to line 1059 because the condition on line 1055 was always true
1056 LOG.error('Hostname has changed from %(old)s '
1057 'to %(new)s. A restart is required to take effect.',
1058 {'old': self._hostname, 'new': hostname})
1059 return self._hostname
1061 def get_node_uuid(self):
1062 """Returns the UUID of this node."""
1063 if not self._node_uuid: 1063 ↛ 1065line 1063 didn't jump to line 1065 because the condition on line 1063 was always true
1064 self._node_uuid = nova.virt.node.get_local_node_uuid()
1065 return self._node_uuid
1067 def find_secret(self, usage_type, usage_id):
1068 """Find a secret.
1070 usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
1071 usage_id: name of resource in secret
1072 """
1073 if usage_type == 'iscsi':
1074 usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
1075 elif usage_type in ('rbd', 'ceph'):
1076 usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
1077 elif usage_type == 'volume':
1078 usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
1079 else:
1080 msg = _("Invalid usage_type: %s")
1081 raise exception.InternalError(msg % usage_type)
1083 try:
1084 conn = self.get_connection()
1085 return conn.secretLookupByUsage(usage_type_const, usage_id)
1086 except libvirt.libvirtError as e:
1087 if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET: 1087 ↛ exitline 1087 didn't return from function 'find_secret' because the condition on line 1087 was always true
1088 return None
1090 def create_secret(self, usage_type, usage_id, password=None, uuid=None):
1091 """Create a secret.
1093 :param usage_type: one of 'iscsi', 'ceph', 'rbd', 'volume', 'vtpm'.
1094 'rbd' will be converted to 'ceph'. 'vtpm' secrets
1095 are private and ephemeral; others are not.
1096 :param usage_id: name of resource in secret
1097 :param password: optional secret value to set
1098 :param uuid: optional UUID of the secret; else one is generated by
1099 libvirt
1100 """
1101 secret_conf = vconfig.LibvirtConfigSecret()
1102 secret_conf.ephemeral = usage_type == 'vtpm'
1103 secret_conf.private = usage_type == 'vtpm'
1104 secret_conf.usage_id = usage_id
1105 secret_conf.uuid = uuid
1106 if usage_type in ('rbd', 'ceph'):
1107 secret_conf.usage_type = 'ceph'
1108 elif usage_type == 'iscsi':
1109 secret_conf.usage_type = 'iscsi'
1110 elif usage_type == 'volume':
1111 secret_conf.usage_type = 'volume'
1112 elif usage_type == 'vtpm':
1113 secret_conf.usage_type = 'vtpm'
1114 else:
1115 msg = _("Invalid usage_type: %s")
1116 raise exception.InternalError(msg % usage_type)
1118 xml = secret_conf.to_xml()
1119 try:
1120 LOG.debug('Secret XML: %s', xml)
1121 conn = self.get_connection()
1122 secret = conn.secretDefineXML(xml)
1123 if password is not None:
1124 secret.setValue(password)
1125 return secret
1126 except libvirt.libvirtError:
1127 with excutils.save_and_reraise_exception():
1128 LOG.error('Error defining a secret with XML: %s', xml)
1130 def delete_secret(self, usage_type, usage_id):
1131 """Delete a secret.
1133 :param usage_type: one of 'iscsi', 'ceph', 'rbd', 'volume' or 'vtpm'
1134 :param usage_id: name of resource in secret
1135 """
1136 secret = self.find_secret(usage_type, usage_id)
1137 if secret is not None:
1138 secret.undefine()
1140 def _get_hardware_info(self):
1141 """Returns hardware information about the Node.
1143 Note that the memory size is reported in MiB instead of KiB.
1144 """
1145 return self.get_connection().getInfo()
1147 def get_memory_mb_total(self):
1148 """Get the total memory size(MB) of physical computer.
1150 :returns: the total amount of memory(MB).
1151 """
1152 if CONF.libvirt.file_backed_memory > 0:
1153 return CONF.libvirt.file_backed_memory
1154 else:
1155 return self._get_hardware_info()[1]
1157 def _sum_domain_memory_mb(self):
1158 """Get the total memory consumed by guest domains."""
1159 used = 0
1160 for guest in self.list_guests():
1161 try:
1162 # TODO(sahid): Use get_info...
1163 dom_mem = int(guest._get_domain_info()[2])
1164 except libvirt.libvirtError as e:
1165 LOG.warning("couldn't obtain the memory from domain:"
1166 " %(uuid)s, exception: %(ex)s",
1167 {"uuid": guest.uuid, "ex": e})
1168 continue
1169 used += dom_mem
1170 # Convert it to MB
1171 return used // units.Ki
1173 @staticmethod
1174 def _get_avail_memory_kb():
1175 with open('/proc/meminfo') as fp:
1176 m = fp.read().split()
1177 idx1 = m.index('MemFree:')
1178 idx2 = m.index('Buffers:')
1179 idx3 = m.index('Cached:')
1181 avail = int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])
1183 return avail
1185 def get_memory_mb_used(self):
1186 """Get the used memory size(MB) of physical computer.
1188 :returns: the total usage of memory(MB).
1189 """
1190 if CONF.libvirt.file_backed_memory > 0:
1191 # For file_backed_memory, report the total usage of guests,
1192 # ignoring host memory
1193 return self._sum_domain_memory_mb()
1194 else:
1195 return (self.get_memory_mb_total() -
1196 (self._get_avail_memory_kb() // units.Ki))
1198 def get_cpu_stats(self):
1199 """Returns the current CPU state of the host with frequency."""
1200 stats = self.get_connection().getCPUStats(
1201 libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
1202 # getInfo() returns various information about the host node
1203 # No. 3 is the expected CPU frequency.
1204 stats["frequency"] = self._get_hardware_info()[3]
1205 return stats
1207 def _check_machine_type(self, caps, mach_type):
1208 """Validate if hw machine type is in capabilities of the host
1210 :param caps: host capabilities
1211 :param mach_type: machine type
1212 """
1213 possible_machine_types = []
1215 caps_tree = etree.fromstring(str(caps))
1216 for guest in caps_tree.findall('guest'):
1217 for machine in guest.xpath('arch/machine'):
1218 possible_machine_types.append(machine.text)
1220 if mach_type not in possible_machine_types:
1221 raise exception.InvalidMachineType(
1222 message="'%s' is not valid/supported machine type, "
1223 "Supported machine types are: %s" % (
1224 mach_type, possible_machine_types))
1226 def write_instance_config(self, xml):
1227 """Defines a domain, but does not start it.
1229 :param xml: XML domain definition of the guest.
1231 :returns: an instance of Guest
1232 """
1233 domain = self.get_connection().defineXML(xml)
1234 return libvirt_guest.Guest(domain)
1236 def device_lookup_by_name(self, name):
1237 """Lookup a node device by its name.
1240 :returns: a virNodeDevice instance
1241 """
1242 return self.get_connection().nodeDeviceLookupByName(name)
1244 def device_create(self, conf):
1245 """Create a node device from specified device XML
1247 This creates the device as transient.
1249 :param conf: A LibvirtConfigObject of the device to create
1251 :returns: a virNodeDevice instance if successful, else None
1252 """
1253 device_xml = conf.to_xml()
1254 return self.get_connection().nodeDeviceCreateXML(device_xml, flags=0)
1256 def device_define(self, conf):
1257 """Define a node device from specified device XML
1259 This defines the device to make it persistent.
1261 :param conf: A LibvirtConfigObject of the device to create
1263 :returns: a virNodeDevice instance if successful, else None
1264 """
1265 device_xml = conf.to_xml()
1266 return self.get_connection().nodeDeviceDefineXML(device_xml, flags=0)
1268 def device_start(self, dev):
1269 """Start a defined node device
1271 :param dev: The virNodeDevice instance to start
1272 """
1273 # extra flags; not used yet, so callers should always pass 0
1274 # https://libvirt.org/html/libvirt-libvirt-nodedev.html
1275 flags = 0
1276 result = dev.create(flags)
1277 if result == -1:
1278 msg = f'Failed to start node device {dev.name()}'
1279 raise exception.InternalError(_(msg))
1281 def device_set_autostart(self, dev, autostart=True):
1282 """Set a node device to automatically start when the host boots
1284 This can set whether the node device should automatically start when
1285 the host machine boots or when the parent device becomes available.
1287 :param dev: The virNodeDevice instance to set the autostart value
1288 :param autostart: Whether to set the device to automatically start
1289 """
1290 result = dev.setAutostart(autostart=autostart)
1291 if result == -1:
1292 msg = (
1293 f'Failed to set autostart to {autostart} for node device '
1294 f'{dev.name()}')
1295 raise exception.InternalError(_(msg))
1297 def _get_pcinet_info(
1298 self,
1299 dev: 'libvirt.virNodeDevice',
1300 net_devs: ty.List['libvirt.virNodeDevice']
1301 ) -> ty.Optional[ty.List[str]]:
1302 """Returns a dict of NET device."""
1303 net_dev = {dev.parent(): dev for dev in net_devs}.get(dev.name(), None)
1304 if net_dev is None:
1305 return None
1306 xmlstr = net_dev.XMLDesc(0)
1307 cfgdev = vconfig.LibvirtConfigNodeDevice()
1308 cfgdev.parse_str(xmlstr)
1309 return cfgdev.pci_capability.features
1311 def _get_vf_parent_pci_vpd_info(
1312 self,
1313 vf_device: 'libvirt.virNodeDevice',
1314 parent_pf_name: str,
1315 candidate_devs: ty.List['libvirt.virNodeDevice']
1316 ) -> ty.Optional[vconfig.LibvirtConfigNodeDeviceVpdCap]:
1317 """Returns PCI VPD info of a parent device of a PCI VF.
1319 :param vf_device: a VF device object to use for lookup.
1320 :param str parent_pf_name: parent PF name formatted as pci_dddd_bb_ss_f
1321 :param candidate_devs: devices that could be parent devs for the VF.
1322 :returns: A VPD capability object of a parent device.
1323 """
1324 parent_dev = next(
1325 (dev for dev in candidate_devs if dev.name() == parent_pf_name),
1326 None
1327 )
1328 if parent_dev is None:
1329 return None
1331 xmlstr = parent_dev.XMLDesc(0)
1332 cfgdev = vconfig.LibvirtConfigNodeDevice()
1333 cfgdev.parse_str(xmlstr)
1334 return cfgdev.pci_capability.vpd_capability
1336 @staticmethod
1337 def _get_vpd_card_serial_number(
1338 dev: 'libvirt.virNodeDevice',
1339 ) -> ty.Optional[ty.List[str]]:
1340 """Returns a card serial number stored in PCI VPD (if present)."""
1341 xmlstr = dev.XMLDesc(0)
1342 cfgdev = vconfig.LibvirtConfigNodeDevice()
1343 cfgdev.parse_str(xmlstr)
1344 vpd_cap = cfgdev.pci_capability.vpd_capability
1345 if not vpd_cap:
1346 return None
1347 return vpd_cap.card_serial_number
1349 def _get_pf_details(self, device: dict, pci_address: str) -> dict:
1350 if device.get('dev_type') != fields.PciDeviceType.SRIOV_PF:
1351 return {}
1353 try:
1354 return {
1355 'mac_address': pci_utils.get_mac_by_pci_address(pci_address)
1356 }
1357 except exception.PciDeviceNotFoundById:
1358 LOG.debug(
1359 'Cannot get MAC address of the PF %s. It is probably attached '
1360 'to a guest already', pci_address)
1361 return {}
1363 def _get_pcidev_info(
1364 self,
1365 devname: str,
1366 dev: 'libvirt.virNodeDevice',
1367 net_devs: ty.List['libvirt.virNodeDevice'],
1368 vdpa_devs: ty.List['libvirt.virNodeDevice'],
1369 pci_devs: ty.List['libvirt.virNodeDevice'],
1370 ) -> ty.Dict[str, ty.Union[str, dict]]:
1371 """Returns a dict of PCI device."""
1373 def _get_device_type(
1374 cfgdev: vconfig.LibvirtConfigNodeDevice,
1375 pci_address: str,
1376 device: 'libvirt.virNodeDevice',
1377 net_devs: ty.List['libvirt.virNodeDevice'],
1378 vdpa_devs: ty.List['libvirt.virNodeDevice'],
1379 ) -> ty.Dict[str, str]:
1380 """Get a PCI device's device type.
1382 An assignable PCI device can be a normal PCI device,
1383 a SR-IOV Physical Function (PF), or a SR-IOV Virtual
1384 Function (VF).
1385 """
1386 net_dev_parents = {dev.parent() for dev in net_devs}
1387 vdpa_parents = {dev.parent() for dev in vdpa_devs}
1388 for fun_cap in cfgdev.pci_capability.fun_capability:
1389 if fun_cap.type == 'virt_functions':
1390 return {
1391 'dev_type': fields.PciDeviceType.SRIOV_PF,
1392 }
1393 if ( 1393 ↛ 1388line 1393 didn't jump to line 1388 because the condition on line 1393 was always true
1394 fun_cap.type == 'phys_function' and
1395 len(fun_cap.device_addrs) != 0
1396 ):
1397 phys_address = "%04x:%02x:%02x.%01x" % (
1398 fun_cap.device_addrs[0][0],
1399 fun_cap.device_addrs[0][1],
1400 fun_cap.device_addrs[0][2],
1401 fun_cap.device_addrs[0][3])
1402 result = {
1403 'dev_type': fields.PciDeviceType.SRIOV_VF,
1404 'parent_addr': phys_address,
1405 }
1406 parent_ifname = None
1407 # NOTE(sean-k-mooney): if the VF is a parent of a netdev
1408 # the PF should also have a netdev, however on some exotic
1409 # hardware such as Cavium ThunderX this may not be the case
1410 # see bug #1915255 for details. As such we wrap this in a
1411 # try except block.
1412 if device.name() in net_dev_parents:
1413 try:
1414 parent_ifname = (
1415 pci_utils.get_ifname_by_pci_address(
1416 pci_address, pf_interface=True))
1417 result['parent_ifname'] = parent_ifname
1418 except exception.PciDeviceNotFoundById:
1419 # NOTE(sean-k-mooney): we ignore this error as it
1420 # is expected when the virtual function is not a
1421 # NIC or the VF does not have a parent PF with a
1422 # netdev. We do not log here as this is called
1423 # in a periodic task and that would be noisy at
1424 # debug level.
1425 pass
1426 if device.name() in vdpa_parents:
1427 result['dev_type'] = fields.PciDeviceType.VDPA
1428 return result
1430 return {'dev_type': fields.PciDeviceType.STANDARD}
1432 def _get_vpd_details(
1433 device_dict: dict,
1434 device: 'libvirt.virNodeDevice',
1435 pci_devs: ty.List['libvirt.virNodeDevice']
1436 ) -> ty.Dict[str, ty.Any]:
1437 """Get information from PCI VPD (if present).
1439 PCI/PCIe devices may include the optional VPD capability. It may
1440 contain useful information such as the unique serial number
1441 uniquely assigned at a factory.
1443 If a device is a VF and it does not contain the VPD capability,
1444 a parent device's VPD is used (if present) as a fallback to
1445 retrieve the unique add-in card number. Whether a VF exposes
1446 the VPD capability or not may be controlled via a vendor-specific
1447 firmware setting.
1448 """
1449 vpd_info: ty.Dict[str, ty.Any] = {}
1450 # At the time of writing only the serial number had a clear
1451 # use-case. However, the set of fields may be extended.
1452 card_serial_number = self._get_vpd_card_serial_number(device)
1454 if (not card_serial_number and
1455 device_dict.get('dev_type') == fields.PciDeviceType.SRIOV_VF
1456 ):
1457 # Format the address of a physical function to use underscores
1458 # since that's how Libvirt formats the <name> element content.
1459 pf_addr = device_dict.get('parent_addr')
1460 if not pf_addr: 1460 ↛ 1461line 1460 didn't jump to line 1461 because the condition on line 1460 was never true
1461 LOG.warning("A VF device dict does not have a parent PF "
1462 "address in it which is unexpected. Skipping "
1463 "serial number retrieval")
1464 return vpd_info
1466 formatted_addr = pf_addr.replace('.', '_').replace(':', '_')
1467 vpd_cap = self._get_vf_parent_pci_vpd_info(
1468 device, f'pci_{formatted_addr}', pci_devs)
1469 if vpd_cap is not None:
1470 card_serial_number = vpd_cap.card_serial_number
1472 if card_serial_number:
1473 vpd_info = {'card_serial_number': card_serial_number}
1474 return vpd_info
1476 def _get_sriov_netdev_details(
1477 device_dict: dict,
1478 device: 'libvirt.virNodeDevice',
1479 ) -> ty.Dict[str, ty.Dict[str, ty.Any]]:
1480 """Get SR-IOV related information"""
1481 sriov_info: ty.Dict[str, ty.Any] = {}
1483 if device_dict.get('dev_type') != fields.PciDeviceType.SRIOV_VF: 1483 ↛ 1484line 1483 didn't jump to line 1484 because the condition on line 1483 was never true
1484 return sriov_info
1486 pf_addr = device_dict['parent_addr']
1488 # A netdev VF may be associated with a PF which does not have a
1489 # netdev as described in LP #1915255.
1490 try:
1491 sriov_info.update({
1492 'pf_mac_address': pci_utils.get_mac_by_pci_address(pf_addr)
1493 })
1494 except exception.PciDeviceNotFoundById:
1495 LOG.debug(f'Could not get a PF mac for {pf_addr}')
1496 # For the purposes Nova uses this information currently,
1497 # having both a PF MAC and a VF number is needed so we return
1498 # an empty dict if a PF MAC is not available.
1499 return {}
1501 vf_num = pci_utils.get_vf_num_by_pci_address(
1502 device_dict['address'])
1504 sriov_info.update({'vf_num': vf_num})
1505 return sriov_info
1507 def _get_device_capabilities(
1508 device_dict: dict,
1509 device: 'libvirt.virNodeDevice',
1510 pci_devs: ty.List['libvirt.virNodeDevice'],
1511 net_devs: ty.List['libvirt.virNodeDevice']
1512 ) -> ty.Dict[str, ty.Any]:
1513 """Get PCI VF device's additional capabilities.
1515 If a PCI device is a virtual function, this function reads the PCI
1516 parent's network capabilities (must be always a NIC device) and
1517 appends this information to the device's dictionary.
1518 """
1519 caps: ty.Dict[str, ty.Any] = {}
1521 if device_dict.get('dev_type') == fields.PciDeviceType.SRIOV_VF:
1522 pcinet_info = self._get_pcinet_info(device, net_devs)
1523 if pcinet_info:
1524 caps['network'] = pcinet_info
1525 # Only attempt to get SR-IOV details if a VF is a netdev
1526 # because there are no use cases for other dev types yet.
1527 sriov_caps = _get_sriov_netdev_details(device_dict, dev)
1528 if sriov_caps:
1529 caps['sriov'] = sriov_caps
1531 vpd_info = _get_vpd_details(device_dict, device, pci_devs)
1532 if vpd_info:
1533 caps['vpd'] = vpd_info
1535 if caps:
1536 return {'capabilities': caps}
1538 return caps
1540 xmlstr = dev.XMLDesc(0)
1541 cfgdev = vconfig.LibvirtConfigNodeDevice()
1542 cfgdev.parse_str(xmlstr)
1544 address = "%04x:%02x:%02x.%1x" % (
1545 cfgdev.pci_capability.domain,
1546 cfgdev.pci_capability.bus,
1547 cfgdev.pci_capability.slot,
1548 cfgdev.pci_capability.function)
1550 device = {
1551 "dev_id": cfgdev.name,
1552 "address": address,
1553 "product_id": "%04x" % cfgdev.pci_capability.product_id,
1554 "vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
1555 }
1557 device["numa_node"] = cfgdev.pci_capability.numa_node
1559 # requirement by DataBase Model
1560 device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
1561 device.update(
1562 _get_device_type(cfgdev, address, dev, net_devs, vdpa_devs))
1563 device.update(_get_device_capabilities(device, dev,
1564 pci_devs, net_devs))
1565 device.update(self._get_pf_details(device, address))
1566 return device
1568 def get_vdpa_nodedev_by_address(
1569 self, pci_address: str,
1570 ) -> vconfig.LibvirtConfigNodeDevice:
1571 """Finds a vDPA device by the parent VF PCI device address.
1573 :param pci_address: Parent PCI device address
1574 :returns: A libvirt nodedev representing the vDPA device
1575 :raises: StopIteration if not found
1576 """
1577 dev_flags = (
1578 libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_VDPA |
1579 libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV
1580 )
1581 devices = {
1582 dev.name(): dev for dev in
1583 self.list_all_devices(flags=dev_flags)}
1584 vdpa_devs = [
1585 dev for dev in devices.values() if "vdpa" in dev.listCaps()]
1586 pci_info = [
1587 self._get_pcidev_info(name, dev, [], vdpa_devs, []) for name, dev
1588 in devices.items() if "pci" in dev.listCaps()]
1589 parent_dev = next(
1590 dev for dev in pci_info if dev['address'] == pci_address)
1591 vdpa_dev = next(
1592 dev for dev in vdpa_devs if dev.parent() == parent_dev['dev_id'])
1593 xmlstr = vdpa_dev.XMLDesc(0)
1594 cfgdev = vconfig.LibvirtConfigNodeDevice()
1595 cfgdev.parse_str(xmlstr)
1596 return cfgdev
1598 def get_vdpa_device_path(
1599 self, pci_address: str,
1600 ) -> str:
1601 """Finds a vDPA device path by the parent VF PCI device address.
1603 :param pci_address: Parent PCI device address
1604 :returns: Device path as string
1605 :raises: StopIteration if not found
1606 """
1607 nodedev = self.get_vdpa_nodedev_by_address(pci_address)
1608 return nodedev.vdpa_capability.dev_path
1610 def list_pci_devices(self, flags: int = 0) -> ty.List[str]:
1611 """Lookup pci devices.
1613 :returns: a list of strings, names of the virNodeDevice instances
1614 """
1615 return self._list_devices("pci", flags=flags)
1617 def list_mdev_capable_devices(self, flags: int = 0) -> ty.List[str]:
1618 """Lookup devices supporting mdev capabilities.
1620 :returns: a list of strings, names of the virNodeDevice instances
1621 """
1622 return self._list_devices("mdev_types", flags=flags)
1624 def list_mediated_devices(self, flags: int = 0) -> ty.List[str]:
1625 """Lookup mediated devices.
1627 :returns: a list of strings, names of the virNodeDevice instances
1628 """
1629 return self._list_devices("mdev", flags=flags)
1631 def _list_devices(self, cap, flags: int = 0) -> ty.List[str]:
1632 """Lookup devices.
1634 :returns: a list of strings, names of the virNodeDevice instances
1635 """
1636 try:
1637 return self.get_connection().listDevices(cap, flags)
1638 except libvirt.libvirtError as ex:
1639 error_code = ex.get_error_code()
1640 if error_code == libvirt.VIR_ERR_NO_SUPPORT:
1641 LOG.warning("URI %(uri)s does not support "
1642 "listDevices: %(error)s",
1643 {'uri': self._uri, 'error': ex})
1644 return []
1645 else:
1646 raise
1648 def list_all_devices(
1649 self, flags: int = 0,
1650 ) -> ty.List['libvirt.virNodeDevice']:
1651 """Lookup devices.
1653 :param flags: a bitmask of flags to filter the returned devices.
1654 :returns: a list of virNodeDevice instances.
1655 """
1656 try:
1657 alldevs = [
1658 self._wrap_libvirt_proxy(dev)
1659 for dev in self.get_connection().listAllDevices(flags)] or []
1660 return alldevs
1661 except libvirt.libvirtError as ex:
1662 LOG.warning(ex)
1663 return []
1665 def compare_cpu(self, xmlDesc, flags=0):
1666 """Compares the given CPU description with the host CPU."""
1667 return self.get_connection().compareCPU(xmlDesc, flags)
1669 def compare_hypervisor_cpu(self, xmlDesc, flags=0):
1670 """Compares the given CPU description with the CPU provided by
1671 the host hypervisor. This is different from the older method,
1672 compare_cpu(), which compares a given CPU definition with the
1673 host CPU without considering the abilities of the host
1674 hypervisor. Except @xmlDesc, rest of all the parameters to
1675 compareHypervisorCPU API are optional (libvirt will choose
1676 sensible defaults).
1677 """
1678 emulator = None
1679 arch = None
1680 machine = None
1681 virttype = None
1682 return self.get_connection().compareHypervisorCPU(
1683 emulator, arch, machine, virttype, xmlDesc, flags)
1685 def is_cpu_control_policy_capable(self):
1686 """Returns whether kernel configuration CGROUP_SCHED is enabled
1688 CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
1689 improve scheduler latency.
1690 """
1691 return self._has_cgroupsv1_cpu_controller() or \
1692 self._has_cgroupsv2_cpu_controller()
1694 def _has_cgroupsv1_cpu_controller(self):
1695 LOG.debug(f"Searching host: '{self.get_hostname()}' "
1696 "for CPU controller through CGroups V1...")
1697 try:
1698 with open("/proc/self/mounts", "r") as fd:
1699 for line in fd.readlines():
1700 # mount options and split options
1701 bits = line.split()[3].split(",")
1702 if "cpu" in bits:
1703 LOG.debug("CPU controller found on host.")
1704 return True
1705 LOG.debug("CPU controller missing on host.")
1706 return False
1707 except IOError as ex:
1708 LOG.debug(f"Search failed due to: '{ex}'. "
1709 "Maybe the host is not running under CGroups V1. "
1710 "Deemed host to be missing controller by this approach.")
1711 return False
1713 def _has_cgroupsv2_cpu_controller(self):
1714 LOG.debug(f"Searching host: '{self.get_hostname()}' "
1715 "for CPU controller through CGroups V2...")
1716 try:
1717 with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
1718 for line in fd.readlines():
1719 bits = line.split()
1720 if "cpu" in bits:
1721 LOG.debug("CPU controller found on host.")
1722 return True
1723 LOG.debug("CPU controller missing on host.")
1724 return False
1725 except IOError as ex:
1726 LOG.debug(f"Search failed due to: '{ex}'. "
1727 "Maybe the host is not running under CGroups V2. "
1728 "Deemed host to be missing controller by this approach.")
1729 return False
1731 def get_canonical_machine_type(self, arch, machine) -> str:
1732 """Resolve a machine type to its canonical representation.
1734 Libvirt supports machine type aliases. On an x86 host the 'pc' machine
1735 type is an alias for e.g. 'pc-1440fx-5.1'. Resolve the provided machine
1736 type to its canonical representation so that it can be used for other
1737 operations.
1739 :param arch: The guest arch.
1740 :param machine: The guest machine type.
1741 :returns: The canonical machine type.
1742 :raises: exception.InternalError if the machine type cannot be resolved
1743 to its canonical representation.
1744 """
1745 for guest in self.get_capabilities().guests:
1746 if guest.arch != arch:
1747 continue
1749 for domain in guest.domains:
1750 if machine in guest.domains[domain].machines:
1751 return machine
1753 if machine in guest.domains[domain].aliases:
1754 return guest.domains[domain].aliases[machine]['canonical']
1756 msg = _('Invalid machine type: %s')
1757 raise exception.InternalError(msg % machine)
1759 def _check_file_backed_memory_support(self):
1760 if not CONF.libvirt.file_backed_memory:
1761 return False
1763 # file_backed_memory is only compatible with qemu/kvm virts
1764 if CONF.libvirt.virt_type not in ("qemu", "kvm"): 1764 ↛ 1765line 1764 didn't jump to line 1765 because the condition on line 1764 was never true
1765 raise exception.InternalError(
1766 _('Running Nova with file_backed_memory and virt_type '
1767 '%(type)s is not supported. file_backed_memory is only '
1768 'supported with qemu and kvm types.') %
1769 {'type': CONF.libvirt.virt_type})
1771 # file-backed memory doesn't work with memory overcommit.
1772 # Block service startup if file-backed memory is enabled and
1773 # ram_allocation_ratio is not 1.0
1774 if CONF.ram_allocation_ratio != 1.0:
1775 raise exception.InternalError(
1776 'Running Nova with file_backed_memory requires '
1777 'ram_allocation_ratio configured to 1.0')
1779 if CONF.reserved_host_memory_mb: 1779 ↛ 1797line 1779 didn't jump to line 1797 because the condition on line 1779 was always true
1780 # this is a hard failure as placement won't allow total < reserved
1781 if CONF.reserved_host_memory_mb >= CONF.libvirt.file_backed_memory:
1782 msg = _(
1783 "'[libvirt] file_backed_memory', which represents total "
1784 "memory reported to placement, must be greater than "
1785 "reserved memory configured via '[DEFAULT] "
1786 "reserved_host_memory_mb'"
1787 )
1788 raise exception.InternalError(msg)
1790 # TODO(stephenfin): Change this to an exception in W or later
1791 LOG.warning(
1792 "Reserving memory via '[DEFAULT] reserved_host_memory_mb' "
1793 "is not compatible with file-backed memory. Consider "
1794 "setting '[DEFAULT] reserved_host_memory_mb' to 0. This will "
1795 "be an error in a future release."
1796 )
1797 return True
1799 @property
1800 def has_hyperthreading(self) -> bool:
1801 """Determine if host CPU has SMT, a.k.a. HyperThreading.
1803 :return: True if the host has SMT enabled, else False.
1804 """
1805 if self._has_hyperthreading is not None:
1806 return self._has_hyperthreading
1808 self._has_hyperthreading = False
1810 # we don't use '/capabilities/host/cpu/topology' since libvirt doesn't
1811 # guarantee the accuracy of this information
1812 for cell in self.get_capabilities().host.topology.cells:
1813 if any(len(cpu.siblings) > 1 for cpu in cell.cpus if cpu.siblings):
1814 self._has_hyperthreading = True
1815 break
1817 return self._has_hyperthreading
1819 @property
1820 def supports_uefi(self) -> bool:
1821 """Determine if the host supports UEFI bootloaders for guests.
1823 This checks whether the feature is supported by *any* machine type.
1824 This is only used for trait-reporting purposes and a machine
1825 type-specific check should be used when creating guests.
1826 """
1828 if self._supports_uefi is not None:
1829 return self._supports_uefi
1831 # we only check the host architecture since nova doesn't support
1832 # non-host architectures currently
1833 arch = self.get_capabilities().host.cpu.arch
1834 domain_caps = self.get_domain_capabilities()
1835 for machine_type in domain_caps[arch]:
1836 LOG.debug("Checking UEFI support for host arch (%s)", arch)
1837 _domain_caps = domain_caps[arch][machine_type]
1838 if _domain_caps.os.uefi_supported:
1839 LOG.info('UEFI support detected')
1840 self._supports_uefi = True
1841 return True
1843 LOG.debug('No UEFI support detected')
1844 self._supports_uefi = False
1845 return False
1847 @property
1848 def supports_virtio_fs(self) -> bool:
1849 return self.has_min_version(
1850 lv_ver=driver.MIN_LIBVIRT_VERSION,
1851 hv_ver=driver.MIN_QEMU_VERSION)
1853 @property
1854 def supports_mem_backing_file(self) -> bool:
1855 return self._check_file_backed_memory_support()
1857 @property
1858 def supports_secure_boot(self) -> bool:
1859 """Determine if the host supports UEFI Secure Boot for guests.
1861 This checks whether the feature is supported by *any* machine type.
1862 This is only used for trait-reporting purposes and a machine
1863 type-specific check should be used when creating guests.
1864 """
1866 if self._supports_secure_boot is not None:
1867 return self._supports_secure_boot
1869 # we only check the host architecture since the libvirt driver doesn't
1870 # truly support non-host architectures currently
1871 arch = self.get_capabilities().host.cpu.arch
1872 domain_caps = self.get_domain_capabilities()
1873 for machine_type in domain_caps[arch]:
1874 LOG.debug(
1875 "Checking secure boot support for host arch (%s)",
1876 arch,
1877 )
1878 _domain_caps = domain_caps[arch][machine_type]
1879 if _domain_caps.os.secure_boot_supported:
1880 LOG.info('Secure Boot support detected')
1881 self._supports_secure_boot = True
1882 return True
1884 LOG.debug('No Secure Boot support detected')
1885 self._supports_secure_boot = False
1886 return False
1888 @property
1889 def supports_vtpm(self) -> ty.Optional[bool]:
1890 # we only check the host architecture and the first machine type
1891 # because vtpm support is independent from cpu architecture
1892 arch = self.get_capabilities().host.cpu.arch
1893 domain_caps = self.get_domain_capabilities()
1894 for machine_type in domain_caps[arch]: 1894 ↛ 1901line 1894 didn't jump to line 1901 because the loop on line 1894 didn't complete
1895 _tpm = domain_caps[arch][machine_type].devices.tpm
1896 # TODO(tkajinam): Remove this once libvirt >= 8.0.0 is required
1897 if _tpm is None:
1898 return None
1899 return (_tpm.supported and 'emulator' in _tpm.backend_models)
1900 # safe guard
1901 return False
1903 @property
1904 def tpm_versions(self) -> ty.Optional[ty.List[str]]:
1905 # we only check the host architecture and the first machine type
1906 # because vtpm support is independent from cpu architecture
1907 arch = self.get_capabilities().host.cpu.arch
1908 domain_caps = self.get_domain_capabilities()
1909 for machine_type in domain_caps[arch]: 1909 ↛ 1919line 1909 didn't jump to line 1919 because the loop on line 1909 didn't complete
1910 _tpm = domain_caps[arch][machine_type].devices.tpm
1911 # TODO(tkajinam): Remove first check once libvirt >= 8.0.0 is
1912 # required
1913 # TODO(tkajinam): Remove second check once libvirt >= 8.6.0 is
1914 # required
1915 if _tpm is None or _tpm.backend_versions is None:
1916 return None
1917 return _tpm.backend_versions
1918 # safe guard
1919 return []
1921 @property
1922 def tpm_models(self) -> ty.Optional[ty.List[str]]:
1923 # we only check the host architecture and the first machine type
1924 # because vtpm support is independent from cpu architecture
1925 arch = self.get_capabilities().host.cpu.arch
1926 domain_caps = self.get_domain_capabilities()
1927 for machine_type in domain_caps[arch]: 1927 ↛ 1937line 1927 didn't jump to line 1937 because the loop on line 1927 didn't complete
1928 _tpm = domain_caps[arch][machine_type].devices.tpm
1929 # TODO(tkajinam): Remove first check once libvirt >= 8.0.0 is
1930 # required
1931 # TODO(tkajinam): Remove second check once libvirt >= 8.6.0 is
1932 # required
1933 if _tpm is None or _tpm.models is None:
1934 return None
1935 return _tpm.models
1936 # safe guard
1937 return []
1939 def _kernel_supports_amd_sev(self) -> bool:
1940 if not os.path.exists(SEV_KERNEL_PARAM_FILE):
1941 LOG.debug("%s does not exist", SEV_KERNEL_PARAM_FILE)
1942 return False
1944 with open(SEV_KERNEL_PARAM_FILE) as f:
1945 content = f.read()
1946 LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, content)
1947 return strutils.bool_from_string(content)
1949 @property
1950 def supports_amd_sev(self) -> bool:
1951 """Determine if the host supports AMD SEV for guests.
1953 Returns a boolean indicating whether AMD SEV (Secure Encrypted
1954 Virtualization) is supported. This is conditional on support
1955 in the hardware, kernel, qemu, and libvirt.
1957 This checks whether the feature is supported by *any* machine type.
1958 This is only used for trait-reporting purposes and a machine
1959 type-specific check should be used when creating guests.
1960 """
1961 if self._supports_amd_sev is not None:
1962 return self._supports_amd_sev
1964 self._supports_amd_sev = False
1966 caps = self.get_capabilities()
1967 if caps.host.cpu.arch != fields.Architecture.X86_64:
1968 return self._supports_amd_sev
1970 if not self._kernel_supports_amd_sev():
1971 LOG.info("kernel doesn't support AMD SEV")
1972 return self._supports_amd_sev
1974 domain_caps = self.get_domain_capabilities()
1975 for arch in domain_caps:
1976 for machine_type in domain_caps[arch]:
1977 LOG.debug("Checking SEV support for arch %s "
1978 "and machine type %s", arch, machine_type)
1979 for feature in domain_caps[arch][machine_type].features:
1980 feature_is_sev = isinstance(
1981 feature, vconfig.LibvirtConfigDomainCapsFeatureSev)
1982 if feature_is_sev and feature.supported:
1983 LOG.info("AMD SEV support detected")
1984 self._supports_amd_sev = True
1985 self._max_sev_guests = feature.max_guests
1986 self._max_sev_es_guests = feature.max_es_guests
1987 return self._supports_amd_sev
1989 LOG.debug("No AMD SEV support detected for any (arch, machine_type)")
1990 return self._supports_amd_sev
1992 @property
1993 def max_sev_guests(self) -> ty.Optional[int]:
1994 """Determine maximum number of guests with AMD SEV.
1995 """
1996 if not self.supports_amd_sev: 1996 ↛ 1997line 1996 didn't jump to line 1997 because the condition on line 1996 was never true
1997 return None
1998 return self._max_sev_guests
2000 @property
2001 def max_sev_es_guests(self) -> ty.Optional[int]:
2002 """Determine maximum number of guests with AMD SEV-ES.
2003 """
2004 if not self.supports_amd_sev:
2005 return None
2006 return self._max_sev_es_guests
2008 @property
2009 def supports_remote_managed_ports(self) -> bool:
2010 """Determine if the host supports remote managed ports.
2012 Returns a boolean indicating whether remote managed ports are
2013 possible to use on this host.
2015 The check is based on a Libvirt version which added support for
2016 parsing and exposing PCI VPD since a card serial number (if present in
2017 the VPD) since the use of remote managed ports depends on this.
2018 https://libvirt.org/news.html#v7-9-0-2021-11-01
2020 The actual presence of a card serial number for a particular device
2021 is meant to be checked elsewhere.
2022 """
2023 return self.has_min_version(lv_ver=(7, 9, 0))
2025 @property
2026 def loaders(self) -> ty.List[dict]:
2027 """Retrieve details of loader configuration for the host.
2029 Inspect the firmware metadata files provided by QEMU [1] to retrieve
2030 information about the firmware supported by this host. Note that most
2031 distros only publish this information for UEFI loaders currently.
2033 This should be removed when libvirt correctly supports switching
2034 between loaders with or without secure boot enabled [2].
2036 [1] https://github.com/qemu/qemu/blob/v5.2.0/docs/interop/firmware.json
2037 [2] https://bugzilla.redhat.com/show_bug.cgi?id=1906500
2039 :returns: An ordered list of loader configuration dictionaries.
2040 """
2041 if self._loaders is not None:
2042 return self._loaders
2044 self._loaders = _get_loaders()
2045 return self._loaders
2047 def get_loader(
2048 self,
2049 arch: str,
2050 machine: str,
2051 has_secure_boot: bool,
2052 ) -> ty.Tuple[str, str, bool]:
2053 """Get loader for the specified architecture and machine type.
2055 :returns: A the bootloader executable path and the NVRAM
2056 template path and a bool indicating if we need to enable SMM.
2057 """
2059 machine = self.get_canonical_machine_type(arch, machine)
2061 for loader in self.loaders:
2062 for target in loader['targets']:
2063 if arch != target['architecture']:
2064 continue
2066 for machine_glob in target['machines']:
2067 # the 'machines' attribute supports glob patterns (e.g.
2068 # 'pc-q35-*') so we need to resolve these
2069 if fnmatch.fnmatch(machine, machine_glob):
2070 break
2071 else:
2072 continue
2074 # if we've got this far, we have a match on the target
2075 break
2076 else:
2077 continue
2079 # if we request secure boot then we should get it and vice versa
2080 if has_secure_boot != ('secure-boot' in loader['features']):
2081 continue
2083 return (
2084 loader['mapping']['executable']['filename'],
2085 loader['mapping']['nvram-template']['filename'],
2086 'requires-smm' in loader['features'],
2087 )
2089 raise exception.UEFINotSupported()