Coverage for nova/virt/fake.py: 74%
435 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
1# Copyright 2010 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# All Rights Reserved.
4# Copyright (c) 2010 Citrix Systems, Inc.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
18"""
19A fake (in-memory) hypervisor+api.
21Allows nova testing w/o a hypervisor. This module also documents the
22semantics of real hypervisor connections.
24"""
26import collections
27import contextlib
28import time
29import uuid
31import fixtures
32import os_resource_classes as orc
33from oslo_log import log as logging
34from oslo_serialization import jsonutils
35from oslo_utils.fixture import uuidsentinel as uuids
36from oslo_utils import versionutils
38from nova.compute import power_state
39from nova.compute import task_states
40from nova.compute import vm_states
41import nova.conf
42from nova.console import type as ctype
43from nova import context as nova_context
44from nova import exception
45from nova import objects
46from nova.objects import diagnostics as diagnostics_obj
47from nova.objects import fields as obj_fields
48from nova.objects import migrate_data
49from nova.virt import driver
50from nova.virt import hardware
51from nova.virt.ironic import driver as ironic
52import nova.virt.node
53from nova.virt import virtapi
55CONF = nova.conf.CONF
57LOG = logging.getLogger(__name__)
60class FakeInstance(object):
62 def __init__(self, name, state, uuid):
63 self.name = name
64 self.state = state
65 self.uuid = uuid
67 def __getitem__(self, key):
68 return getattr(self, key)
71class Resources(object):
72 vcpus = 0
73 memory_mb = 0
74 local_gb = 0
75 vcpus_used = 0
76 memory_mb_used = 0
77 local_gb_used = 0
79 def __init__(self, vcpus=8, memory_mb=8000, local_gb=500):
80 self.vcpus = vcpus
81 self.memory_mb = memory_mb
82 self.local_gb = local_gb
84 def claim(self, vcpus=0, mem=0, disk=0):
85 self.vcpus_used += vcpus
86 self.memory_mb_used += mem
87 self.local_gb_used += disk
89 def release(self, vcpus=0, mem=0, disk=0):
90 self.vcpus_used -= vcpus
91 self.memory_mb_used -= mem
92 self.local_gb_used -= disk
94 def dump(self):
95 return {
96 'vcpus': self.vcpus,
97 'memory_mb': self.memory_mb,
98 'local_gb': self.local_gb,
99 'vcpus_used': self.vcpus_used,
100 'memory_mb_used': self.memory_mb_used,
101 'local_gb_used': self.local_gb_used
102 }
105class FakeDriver(driver.ComputeDriver):
106 # These must match the traits in
107 # nova.tests.functional.integrated_helpers.ProviderUsageBaseTestCase
108 capabilities = {
109 "has_imagecache": True,
110 "supports_evacuate": True,
111 "supports_migrate_to_same_host": False,
112 "supports_attach_interface": True,
113 "supports_device_tagging": True,
114 "supports_tagged_attach_interface": True,
115 "supports_tagged_attach_volume": True,
116 "supports_extend_volume": True,
117 "supports_multiattach": True,
118 "supports_trusted_certs": True,
119 "supports_pcpus": False,
120 "supports_accelerators": True,
121 "supports_remote_managed_ports": True,
122 "supports_address_space_passthrough": True,
123 "supports_address_space_emulated": True,
124 "supports_stateless_firmware": True,
125 "supports_virtio_fs": True,
126 "supports_mem_backing_file": True,
128 # Supported image types
129 "supports_image_type_raw": True,
130 "supports_image_type_vhd": False,
131 }
133 # Since we don't have a real hypervisor, pretend we have lots of
134 # disk and ram so this driver can be used to test large instances.
135 vcpus = 1000
136 memory_mb = 800000
137 local_gb = 600000
139 """Fake hypervisor driver."""
141 def __init__(self, virtapi, read_only=False):
142 super(FakeDriver, self).__init__(virtapi)
143 self.instances = {}
144 self.resources = Resources(
145 vcpus=self.vcpus,
146 memory_mb=self.memory_mb,
147 local_gb=self.local_gb)
148 self.host_status_base = {
149 'hypervisor_type': 'fake',
150 'hypervisor_version': versionutils.convert_version_to_int('1.0'),
151 'hypervisor_hostname': CONF.host,
152 'cpu_info': {},
153 'disk_available_least': 0,
154 'supported_instances': [(
155 obj_fields.Architecture.X86_64,
156 obj_fields.HVType.FAKE,
157 obj_fields.VMMode.HVM)],
158 'numa_topology': None,
159 }
160 self._mounts = {}
161 self._interfaces = {}
162 self.active_migrations = {}
163 self._host = None
164 self._nodes = None
166 def init_host(self, host):
167 self._host = host
168 # NOTE(gibi): this is unnecessary complex and fragile but this is
169 # how many current functional sample tests expect the node name.
170 self._set_nodes(['fake-mini'] if self._host == 'compute'
171 else [self._host])
173 def _set_nodes(self, nodes):
174 # NOTE(gibi): this is not part of the driver interface but used
175 # by our tests to customize the discovered nodes by the fake
176 # driver.
177 self._nodes = nodes
179 def list_instances(self):
180 return [self.instances[uuid].name for uuid in self.instances.keys()]
182 def list_instance_uuids(self):
183 return list(self.instances.keys())
185 def plug_vifs(self, instance, network_info):
186 """Plug VIFs into networks."""
187 pass
189 def unplug_vifs(self, instance, network_info):
190 """Unplug VIFs from networks."""
191 pass
193 def spawn(self, context, instance, image_meta, injected_files,
194 admin_password, allocations, network_info=None,
195 block_device_info=None, power_on=True, accel_info=None):
197 if network_info:
198 for vif in network_info:
199 # simulate a real driver triggering the async network
200 # allocation as it might cause an error
201 vif.fixed_ips()
202 # store the vif as attached so we can allow detaching it later
203 # with a detach_interface() call.
204 self._interfaces[vif['id']] = vif
206 uuid = instance.uuid
207 state = power_state.RUNNING if power_on else power_state.SHUTDOWN
208 flavor = instance.flavor
209 self.resources.claim(
210 vcpus=flavor.vcpus,
211 mem=flavor.memory_mb,
212 disk=flavor.root_gb)
213 fake_instance = FakeInstance(instance.name, state, uuid)
214 self.instances[uuid] = fake_instance
216 def snapshot(self, context, instance, image_id, update_task_state):
217 if instance.uuid not in self.instances:
218 raise exception.InstanceNotRunning(instance_id=instance.uuid)
219 update_task_state(task_state=task_states.IMAGE_UPLOADING)
221 def reboot(self, context, instance, network_info, reboot_type,
222 block_device_info=None, bad_volumes_callback=None,
223 accel_info=None, share_info=None):
224 # If the guest is not on the hypervisor and we're doing a hard reboot
225 # then mimic the libvirt driver by spawning the guest.
226 if (instance.uuid not in self.instances and 226 ↛ 228line 226 didn't jump to line 228 because the condition on line 226 was never true
227 reboot_type.lower() == 'hard'):
228 injected_files = admin_password = allocations = None
229 self.spawn(context, instance, instance.image_meta, injected_files,
230 admin_password, allocations,
231 block_device_info=block_device_info)
232 else:
233 # Just try to power on the guest.
234 self.power_on(context, instance, network_info,
235 block_device_info=block_device_info)
237 def get_host_ip_addr(self):
238 return '192.168.0.1'
240 def set_admin_password(self, instance, new_pass):
241 pass
243 def resume_state_on_host_boot(self, context, instance, network_info,
244 share_info, block_device_info=None):
245 pass
247 def rescue(self, context, instance, network_info, image_meta,
248 rescue_password, block_device_info, share_info):
249 pass
251 def unrescue(
252 self,
253 context: nova_context.RequestContext,
254 instance: 'objects.Instance',
255 ):
256 self.instances[instance.uuid].state = power_state.RUNNING
258 def poll_rebooting_instances(self, timeout, instances):
259 pass
261 def migrate_disk_and_power_off(self, context, instance, dest,
262 flavor, network_info,
263 block_device_info=None,
264 timeout=0, retry_interval=0):
265 pass
267 def finish_revert_migration(self, context, instance, network_info,
268 migration, block_device_info=None,
269 power_on=True):
270 state = power_state.RUNNING if power_on else power_state.SHUTDOWN
271 self.instances[instance.uuid] = FakeInstance(
272 instance.name, state, instance.uuid)
274 def post_live_migration_at_destination(self, context, instance,
275 network_info,
276 block_migration=False,
277 block_device_info=None):
278 # Called from the destination host after a successful live migration
279 # so spawn the instance on this host to track it properly.
280 image_meta = injected_files = admin_password = allocations = None
281 self.spawn(context, instance, image_meta, injected_files,
282 admin_password, allocations)
284 def power_off(self, instance, timeout=0, retry_interval=0):
285 if instance.uuid in self.instances:
286 self.instances[instance.uuid].state = power_state.SHUTDOWN
287 else:
288 raise exception.InstanceNotFound(instance_id=instance.uuid)
290 def power_on(self, context, instance, network_info,
291 block_device_info=None, accel_info=None, share_info=None):
292 if instance.uuid in self.instances: 292 ↛ 298line 292 didn't jump to line 298 because the condition on line 292 was always true
293 self.instances[instance.uuid].state = power_state.RUNNING
294 if share_info: 294 ↛ 295line 294 didn't jump to line 295 because the condition on line 294 was never true
295 for share in share_info:
296 share.activate()
297 else:
298 raise exception.InstanceNotFound(instance_id=instance.uuid)
300 def mount_share(self, context, instance, share_mapping):
301 pass
303 def umount_share(self, context, instance, share_mapping):
304 pass
306 def trigger_crash_dump(self, instance):
307 pass
309 def soft_delete(self, instance):
310 pass
312 def restore(self, instance):
313 pass
315 def pause(self, instance):
316 pass
318 def unpause(self, instance):
319 pass
321 def suspend(self, context, instance):
322 pass
324 def resume(
325 self,
326 context,
327 instance,
328 network_info,
329 block_device_info=None,
330 share_info=None
331 ):
332 pass
334 def destroy(self, context, instance, network_info, block_device_info=None,
335 destroy_disks=True, destroy_secrets=True):
336 key = instance.uuid
337 if key in self.instances:
338 flavor = instance.flavor
339 self.resources.release(
340 vcpus=flavor.vcpus,
341 mem=flavor.memory_mb,
342 disk=flavor.root_gb)
343 del self.instances[key]
344 else:
345 LOG.warning("Key '%(key)s' not in instances '%(inst)s'",
346 {'key': key,
347 'inst': self.instances}, instance=instance)
349 def cleanup(self, context, instance, network_info, block_device_info=None,
350 destroy_disks=True, migrate_data=None, destroy_vifs=True,
351 destroy_secrets=True):
352 # cleanup() should not be called when the guest has not been destroyed.
353 if instance.uuid in self.instances: 353 ↛ 354line 353 didn't jump to line 354 because the condition on line 353 was never true
354 raise exception.InstanceExists(
355 "Instance %s has not been destroyed." % instance.uuid)
357 def attach_volume(self, context, connection_info, instance, mountpoint,
358 disk_bus=None, device_type=None, encryption=None):
359 """Attach the disk to the instance at mountpoint using info."""
360 instance_name = instance.name
361 if instance_name not in self._mounts: 361 ↛ 363line 361 didn't jump to line 363 because the condition on line 361 was always true
362 self._mounts[instance_name] = {}
363 self._mounts[instance_name][mountpoint] = connection_info
365 def detach_volume(self, context, connection_info, instance, mountpoint,
366 encryption=None):
367 """Detach the disk attached to the instance."""
368 try:
369 del self._mounts[instance.name][mountpoint]
370 except KeyError:
371 pass
373 def swap_volume(self, context, old_connection_info, new_connection_info,
374 instance, mountpoint, resize_to):
375 """Replace the disk attached to the instance."""
376 instance_name = instance.name
377 if instance_name not in self._mounts:
378 self._mounts[instance_name] = {}
379 self._mounts[instance_name][mountpoint] = new_connection_info
381 def extend_volume(self, context, connection_info, instance,
382 requested_size):
383 """Extend the disk attached to the instance."""
384 pass
386 def attach_interface(self, context, instance, image_meta, vif):
387 if vif['id'] in self._interfaces: 387 ↛ 388line 387 didn't jump to line 388 because the condition on line 387 was never true
388 raise exception.InterfaceAttachFailed(
389 instance_uuid=instance.uuid)
390 self._interfaces[vif['id']] = vif
392 def detach_interface(self, context, instance, vif):
393 try:
394 del self._interfaces[vif['id']]
395 except KeyError:
396 raise exception.InterfaceDetachFailed(
397 instance_uuid=instance.uuid)
399 def get_info(self, instance, use_cache=True):
400 if instance.uuid not in self.instances:
401 raise exception.InstanceNotFound(instance_id=instance.uuid)
402 i = self.instances[instance.uuid]
403 return hardware.InstanceInfo(state=i.state)
405 def get_diagnostics(self, instance):
406 return {'cpu0_time': 17300000000,
407 'memory': 524288,
408 'vda_errors': -1,
409 'vda_read': 262144,
410 'vda_read_req': 112,
411 'vda_write': 5778432,
412 'vda_write_req': 488,
413 'vnet1_rx': 2070139,
414 'vnet1_rx_drop': 0,
415 'vnet1_rx_errors': 0,
416 'vnet1_rx_packets': 26701,
417 'vnet1_tx': 140208,
418 'vnet1_tx_drop': 0,
419 'vnet1_tx_errors': 0,
420 'vnet1_tx_packets': 662,
421 }
423 def get_instance_diagnostics(self, instance):
424 diags = diagnostics_obj.Diagnostics(
425 state='running', driver='libvirt', hypervisor='kvm',
426 hypervisor_os='ubuntu', uptime=46664, config_drive=True)
427 diags.add_cpu(id=0, time=17300000000, utilisation=15)
428 diags.add_nic(mac_address='01:23:45:67:89:ab',
429 rx_octets=2070139,
430 rx_errors=100,
431 rx_drop=200,
432 rx_packets=26701,
433 rx_rate=300,
434 tx_octets=140208,
435 tx_errors=400,
436 tx_drop=500,
437 tx_packets = 662,
438 tx_rate=600)
439 diags.add_disk(read_bytes=262144,
440 read_requests=112,
441 write_bytes=5778432,
442 write_requests=488,
443 errors_count=1)
444 diags.memory_details = diagnostics_obj.MemoryDiagnostics(
445 maximum=524288, used=0)
446 return diags
448 def get_all_volume_usage(self, context, compute_host_bdms):
449 """Return usage info for volumes attached to vms on
450 a given host.
451 """
452 volusage = []
453 if compute_host_bdms:
454 volusage = [{'volume': compute_host_bdms[0][
455 'instance_bdms'][0]['volume_id'],
456 'instance': compute_host_bdms[0]['instance'],
457 'rd_bytes': 0,
458 'rd_req': 0,
459 'wr_bytes': 0,
460 'wr_req': 0}]
462 return volusage
464 def get_host_cpu_stats(self):
465 stats = {'kernel': 5664160000000,
466 'idle': 1592705190000000,
467 'user': 26728850000000,
468 'iowait': 6121490000000}
469 stats['frequency'] = 800
470 return stats
472 def block_stats(self, instance, disk_id):
473 return [0, 0, 0, 0, None]
475 def get_console_output(self, context, instance):
476 return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
478 def get_vnc_console(self, context, instance):
479 return ctype.ConsoleVNC(
480 internal_access_path=uuids.vnc_access_path,
481 host='fakevncconsole.com',
482 port=6969)
484 def get_spice_console(self, context, instance):
485 return ctype.ConsoleSpice(
486 internal_access_path=uuids.spice_access_path,
487 host='fakespiceconsole.com',
488 port=6969,
489 tlsPort=6970)
491 def get_serial_console(self, context, instance):
492 return ctype.ConsoleSerial(
493 internal_access_path=uuids.serial_access_path,
494 host='fakerdpconsole.com',
495 port=6969)
497 def get_mks_console(self, context, instance):
498 return ctype.ConsoleMKS(
499 internal_access_path=uuids.mks_access_path,
500 host='fakemksconsole.com',
501 port=6969)
503 def get_available_resource(self, nodename):
504 """Updates compute manager resource info on ComputeNode table.
506 Since we don't have a real hypervisor, pretend we have lots of
507 disk and ram.
508 """
509 cpu_info = collections.OrderedDict([
510 ('arch', 'x86_64'),
511 ('model', 'Nehalem'),
512 ('vendor', 'Intel'),
513 ('features', ['pge', 'clflush']),
514 ('topology', {
515 'cores': 1,
516 'threads': 1,
517 'sockets': 4,
518 }),
519 ])
520 if nodename not in self.get_available_nodes(): 520 ↛ 521line 520 didn't jump to line 521 because the condition on line 520 was never true
521 return {}
523 host_status = self.host_status_base.copy()
524 host_status.update(self.resources.dump())
525 host_status['hypervisor_hostname'] = nodename
526 host_status['host_hostname'] = nodename
527 host_status['host_name_label'] = nodename
528 host_status['cpu_info'] = jsonutils.dumps(cpu_info)
529 # NOTE(danms): Because the fake driver runs on the same host
530 # in tests, potentially with multiple nodes, we need to
531 # control our node uuids. Make sure we return a unique and
532 # consistent uuid for each node we are responsible for to
533 # avoid the persistent local node identity from taking over.
534 host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
535 return host_status
537 def update_provider_tree(self, provider_tree, nodename, allocations=None):
538 # NOTE(yikun): If the inv record does not exists, the allocation_ratio
539 # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
540 # is set, and fallback to use the initial_xxx_allocation_ratio
541 # otherwise.
542 inv = provider_tree.data(nodename).inventory
543 ratios = self._get_allocation_ratios(inv)
544 inventory = {
545 'VCPU': {
546 'total': self.vcpus,
547 'min_unit': 1,
548 'max_unit': self.vcpus,
549 'step_size': 1,
550 'allocation_ratio': ratios[orc.VCPU],
551 'reserved': CONF.reserved_host_cpus,
552 },
553 'MEMORY_MB': {
554 'total': self.memory_mb,
555 'min_unit': 1,
556 'max_unit': self.memory_mb,
557 'step_size': 1,
558 'allocation_ratio': ratios[orc.MEMORY_MB],
559 'reserved': CONF.reserved_host_memory_mb,
560 },
561 'DISK_GB': {
562 'total': self.local_gb,
563 'min_unit': 1,
564 'max_unit': self.local_gb,
565 'step_size': 1,
566 'allocation_ratio': ratios[orc.DISK_GB],
567 'reserved': self._get_reserved_host_disk_gb_from_config(),
568 },
569 }
570 provider_tree.update_inventory(nodename, inventory)
572 def get_instance_disk_info(self, instance, block_device_info=None):
573 return
575 def live_migration(self, context, instance, dest,
576 post_method, recover_method, block_migration=False,
577 migrate_data=None):
578 post_method(context, instance, dest, block_migration,
579 migrate_data)
580 return
582 def live_migration_force_complete(self, instance):
583 return
585 def live_migration_abort(self, instance):
586 return
588 def cleanup_live_migration_destination_check(self, context,
589 dest_check_data):
590 return
592 def check_can_live_migrate_destination(self, context, instance,
593 src_compute_info, dst_compute_info,
594 block_migration=False,
595 disk_over_commit=False):
596 data = migrate_data.LibvirtLiveMigrateData()
597 data.filename = 'fake'
598 data.image_type = CONF.libvirt.images_type
599 data.graphics_listen_addr_vnc = CONF.vnc.server_listen
600 data.graphics_listen_addr_spice = CONF.spice.server_listen
601 data.serial_listen_addr = None
602 # Notes(eliqiao): block_migration and disk_over_commit are not
603 # nullable, so just don't set them if they are None
604 if block_migration is not None:
605 data.block_migration = block_migration
606 if disk_over_commit is not None:
607 data.disk_over_commit = disk_over_commit
608 data.disk_available_mb = 100000
609 data.is_shared_block_storage = True
610 data.is_shared_instance_path = True
612 return data
614 def check_can_live_migrate_source(self, context, instance,
615 dest_check_data, block_device_info=None):
616 return dest_check_data
618 def finish_migration(self, context, migration, instance, disk_info,
619 network_info, image_meta, resize_instance,
620 allocations, block_device_info=None, power_on=True):
621 injected_files = admin_password = None
622 # Finish migration is just like spawning the guest on a destination
623 # host during resize/cold migrate, so reuse the spawn() fake to
624 # claim resources and track the instance on this "hypervisor".
625 self.spawn(context, instance, image_meta, injected_files,
626 admin_password, allocations,
627 block_device_info=block_device_info, power_on=power_on)
629 def confirm_migration(self, context, migration, instance, network_info):
630 # Confirm migration cleans up the guest from the source host so just
631 # destroy the guest to remove it from the list of tracked instances
632 # unless it is a same-host resize.
633 if migration.source_compute != migration.dest_compute: 633 ↛ 634line 633 didn't jump to line 634 because the condition on line 633 was never true
634 self.destroy(context, instance, network_info)
636 def pre_live_migration(self, context, instance, block_device_info,
637 network_info, disk_info, migrate_data):
638 return migrate_data
640 def rollback_live_migration_at_destination(self, context, instance,
641 network_info,
642 block_device_info,
643 destroy_disks=True,
644 migrate_data=None):
645 return
647 def _test_remove_vm(self, instance_uuid):
648 """Removes the named VM, as if it crashed. For testing."""
649 self.instances.pop(instance_uuid)
651 def host_power_action(self, action):
652 """Reboots, shuts down or powers up the host."""
653 return action
655 def host_maintenance_mode(self, host, mode):
656 """Start/Stop host maintenance window. On start, it triggers
657 guest VMs evacuation.
658 """
659 if not mode:
660 return 'off_maintenance'
661 return 'on_maintenance'
663 def set_host_enabled(self, enabled):
664 """Sets the specified host's ability to accept new instances."""
665 if enabled:
666 return 'enabled'
667 return 'disabled'
669 def get_volume_connector(self, instance):
670 return {'ip': CONF.my_block_storage_ip,
671 'initiator': 'fake',
672 'host': self._host}
674 def get_available_nodes(self, refresh=False):
675 return self._nodes
677 def get_nodenames_by_uuid(self, refresh=False):
678 return {str(getattr(uuids, 'node_%s' % n)): n
679 for n in self.get_available_nodes()}
681 def instance_on_disk(self, instance):
682 return False
684 def quiesce(self, context, instance, image_meta):
685 pass
687 def unquiesce(self, context, instance, image_meta):
688 pass
691class FakeVirtAPI(virtapi.VirtAPI):
692 @contextlib.contextmanager
693 def wait_for_instance_event(self, instance, event_names, deadline=300,
694 error_callback=None):
695 # NOTE(danms): Don't actually wait for any events, just
696 # fall through
697 yield
699 def exit_wait_early(self, events):
700 # We never wait, so there is nothing to exit early
701 pass
703 def update_compute_provider_status(self, context, rp_uuid, enabled):
704 pass
707class SmallFakeDriver(FakeDriver):
708 # The api samples expect specific cpu memory and disk sizes. In order to
709 # allow the FakeVirt driver to be used outside of the unit tests, provide
710 # a separate class that has the values expected by the api samples. So
711 # instead of requiring new samples every time those
712 # values are adjusted allow them to be overwritten here.
714 vcpus = 2
715 memory_mb = 8192
716 local_gb = 1028
719class MediumFakeDriver(FakeDriver):
720 # Fake driver that has enough resources to host more than one instance
721 # but not that much that cannot be exhausted easily
723 vcpus = 10
724 memory_mb = 8192
725 local_gb = 1028
728class SameHostColdMigrateDriver(MediumFakeDriver):
729 """MediumFakeDriver variant that supports same-host cold migrate."""
730 capabilities = dict(FakeDriver.capabilities,
731 supports_migrate_to_same_host=True)
734class RescueBFVDriver(MediumFakeDriver):
735 capabilities = dict(FakeDriver.capabilities, supports_bfv_rescue=True)
738class PowerUpdateFakeDriver(SmallFakeDriver):
739 # A specific fake driver for the power-update external event testing.
741 def __init__(self, virtapi):
742 super(PowerUpdateFakeDriver, self).__init__(virtapi=None)
743 self.driver = ironic.IronicDriver(virtapi=virtapi)
745 def power_update_event(self, instance, target_power_state):
746 """Update power state of the specified instance in the nova DB."""
747 self.driver.power_update_event(instance, target_power_state)
750class MediumFakeDriverWithNestedCustomResources(MediumFakeDriver):
751 # A MediumFakeDriver variant that also reports CUSTOM_MAGIC resources on
752 # a nested resource provider
753 vcpus = 10
754 memory_mb = 8192
755 local_gb = 1028
756 child_resources = {
757 'CUSTOM_MAGIC': {
758 'total': 10,
759 'reserved': 0,
760 'min_unit': 1,
761 'max_unit': 10,
762 'step_size': 1,
763 'allocation_ratio': 1,
764 }
765 }
767 def update_provider_tree(self, provider_tree, nodename, allocations=None):
768 super(
769 MediumFakeDriverWithNestedCustomResources,
770 self).update_provider_tree(
771 provider_tree, nodename,
772 allocations=allocations)
774 if not provider_tree.exists(nodename + '-child'):
775 provider_tree.new_child(name=nodename + '-child',
776 parent=nodename)
778 provider_tree.update_inventory(nodename + '-child',
779 self.child_resources)
782class FakeFinishMigrationFailDriver(FakeDriver):
783 """FakeDriver variant that will raise an exception from finish_migration"""
785 def finish_migration(self, *args, **kwargs):
786 raise exception.VirtualInterfaceCreateException()
789class PredictableNodeUUIDDriver(SmallFakeDriver):
790 """SmallFakeDriver variant that reports a predictable node uuid in
791 get_available_resource, like IronicDriver.
792 """
794 def get_available_resource(self, nodename):
795 resources = super(
796 PredictableNodeUUIDDriver, self).get_available_resource(nodename)
797 # This is used in ComputeNode.update_from_virt_driver which is called
798 # from the ResourceTracker when creating a ComputeNode.
799 resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
800 return resources
803class FakeRescheduleDriver(FakeDriver):
804 """FakeDriver derivative that triggers a reschedule on the first spawn
805 attempt. This is expected to only be used in tests that have more than
806 one compute service.
807 """
808 # dict, keyed by instance uuid, mapped to a boolean telling us if the
809 # instance has been rescheduled or not
810 rescheduled = {}
812 def spawn(self, context, instance, image_meta, injected_files,
813 admin_password, allocations, network_info=None,
814 block_device_info=None, power_on=True, accel_info=None):
815 if not self.rescheduled.get(instance.uuid, False):
816 # We only reschedule on the first time something hits spawn().
817 self.rescheduled[instance.uuid] = True
818 raise exception.ComputeResourcesUnavailable(
819 reason='FakeRescheduleDriver')
820 super(FakeRescheduleDriver, self).spawn(
821 context, instance, image_meta, injected_files,
822 admin_password, allocations, network_info, block_device_info,
823 power_on)
826class FakeRescheduleDriverWithNestedCustomResources(
827 FakeRescheduleDriver, MediumFakeDriverWithNestedCustomResources):
828 pass
831class FakeBuildAbortDriver(FakeDriver):
832 """FakeDriver derivative that always fails on spawn() with a
833 BuildAbortException so no reschedule is attempted.
834 """
836 def spawn(self, context, instance, image_meta, injected_files,
837 admin_password, allocations, network_info=None,
838 block_device_info=None, power_on=True, accel_info=None):
839 raise exception.BuildAbortException(
840 instance_uuid=instance.uuid, reason='FakeBuildAbortDriver')
843class FakeBuildAbortDriverWithNestedCustomResources(
844 FakeBuildAbortDriver, MediumFakeDriverWithNestedCustomResources):
845 pass
848class FakeUnshelveSpawnFailDriver(FakeDriver):
849 """FakeDriver derivative that always fails on spawn() with a
850 VirtualInterfaceCreateException when unshelving an offloaded instance.
851 """
853 def spawn(self, context, instance, image_meta, injected_files,
854 admin_password, allocations, network_info=None,
855 block_device_info=None, power_on=True, accel_info=None):
856 if instance.vm_state == vm_states.SHELVED_OFFLOADED:
857 raise exception.VirtualInterfaceCreateException(
858 'FakeUnshelveSpawnFailDriver')
859 # Otherwise spawn normally during the initial build.
860 super(FakeUnshelveSpawnFailDriver, self).spawn(
861 context, instance, image_meta, injected_files,
862 admin_password, allocations, network_info, block_device_info,
863 power_on)
866class FakeUnshelveSpawnFailDriverWithNestedCustomResources(
867 FakeUnshelveSpawnFailDriver, MediumFakeDriverWithNestedCustomResources):
868 pass
871class FakeLiveMigrateDriver(FakeDriver):
872 """FakeDriver derivative to handle force_complete and abort calls.
874 This module serves those tests that need to abort or force-complete
875 the live migration, thus the live migration will never be finished
876 without the force_complete_migration or delete_migration API calls.
878 """
880 def __init__(self, virtapi, read_only=False):
881 super(FakeLiveMigrateDriver, self).__init__(virtapi, read_only)
882 self._migrating = True
883 self._abort_migration = True
885 def live_migration(self, context, instance, dest,
886 post_method, recover_method, block_migration=False,
887 migrate_data=None):
888 self._abort_migration = False
889 self._migrating = True
890 count = 0
891 while self._migrating and count < 50:
892 time.sleep(0.1)
893 count = count + 1
895 if self._abort_migration:
896 recover_method(context, instance, dest, migrate_data,
897 migration_status='cancelled')
898 else:
899 post_method(context, instance, dest, block_migration,
900 migrate_data)
902 def live_migration_force_complete(self, instance):
903 self._migrating = False
904 if instance.uuid in self.instances:
905 del self.instances[instance.uuid]
907 def live_migration_abort(self, instance):
908 self._abort_migration = True
909 self._migrating = False
911 def post_live_migration(self, context, instance, block_device_info,
912 migrate_data=None):
913 # Runs on the source host, called from
914 # ComputeManager._post_live_migration so just delete the instance
915 # from being tracked on the source host.
916 self.destroy(context, instance, network_info=None,
917 block_device_info=block_device_info)
920class FakeLiveMigrateDriverWithNestedCustomResources(
921 FakeLiveMigrateDriver, MediumFakeDriverWithNestedCustomResources):
922 pass
925class FakeDriverWithPciResources(SmallFakeDriver):
926 """NOTE: this driver provides symmetric compute nodes. Each compute will
927 have the same resources with the same addresses. It is dangerous as using
928 this driver can hide issues when in an asymmetric environment nova fails to
929 update entities according to the host specific addresses (e.g. pci_slot of
930 the neutron port bindings).
932 The current non virt driver specific functional test environment has many
933 shortcomings making it really hard to simulate host specific virt drivers.
935 1) The virt driver is instantiated by the service logic from the name of
936 the driver class. This makes passing input to the driver instance from the
937 test at init time pretty impossible. This could be solved with some
938 fixtures around nova.virt.driver.load_compute_driver()
940 2) The compute service access the hypervisor not only via the virt
941 interface but also reads the sysfs of the host. So simply providing a fake
942 virt driver instance is not enough to isolate simulated compute services
943 that are running on the same host. Also these low level sysfs reads are not
944 having host specific information in the call params. So simply mocking the
945 low level call does not give a way to provide host specific return values.
947 3) CONF is global, and it is read dynamically by the driver. So
948 providing host specific CONF to driver instances without race conditions
949 between the drivers are extremely hard especially if periodic tasks are
950 enabled.
952 The libvirt based functional test env under nova.tests.functional.libvirt
953 has better support to create asymmetric environments. So please consider
954 using that if possible instead.
955 """
957 PCI_ADDR_PF1 = '0000:01:00.0'
958 PCI_ADDR_PF1_VF1 = '0000:01:00.1'
959 PCI_ADDR_PF2 = '0000:02:00.0'
960 PCI_ADDR_PF2_VF1 = '0000:02:00.1'
961 PCI_ADDR_PF3 = '0000:03:00.0'
962 PCI_ADDR_PF3_VF1 = '0000:03:00.1'
964 # NOTE(gibi): Always use this fixture along with the
965 # FakeDriverWithPciResources to make the necessary configuration for the
966 # driver.
967 class FakeDriverWithPciResourcesConfigFixture(fixtures.Fixture):
968 def setUp(self):
969 super(FakeDriverWithPciResources.
970 FakeDriverWithPciResourcesConfigFixture, self).setUp()
971 # Set device_spec before the compute node starts to match
972 # with the PCI devices reported by this fake driver.
974 # NOTE(gibi): 0000:01:00 is tagged to physnet1 and therefore not a
975 # match based on physnet to our sriov port
976 # 'port_with_sriov_resource_request' as the network of that port
977 # points to physnet2 with the attribute
978 # 'provider:physical_network'. Nova pci handling already enforces
979 # this rule.
980 #
981 # 0000:02:00 and 0000:03:00 are both tagged to physnet2 and
982 # therefore a good match for our sriov port based on physnet.
983 # Having two PFs on the same physnet will allow us to test the
984 # placement allocation - physical allocation matching based on the
985 # bandwidth allocation in the future.
986 CONF.set_override('device_spec', override=[
987 jsonutils.dumps(
988 {
989 "address": {
990 "domain": "0000",
991 "bus": "01",
992 "slot": "00",
993 "function": ".*"},
994 "physical_network": "physnet1",
995 }
996 ),
997 jsonutils.dumps(
998 {
999 "address": {
1000 "domain": "0000",
1001 "bus": "02",
1002 "slot": "00",
1003 "function": ".*"},
1004 "physical_network": "physnet2",
1005 }
1006 ),
1007 jsonutils.dumps(
1008 {
1009 "address": {
1010 "domain": "0000",
1011 "bus": "03",
1012 "slot": "00",
1013 "function": ".*"},
1014 "physical_network": "physnet2",
1015 }
1016 ),
1017 ],
1018 group='pci')
1020 # These mocks should be removed after bug
1021 # https://bugs.launchpad.net/nova/+bug/1961587 has been fixed and
1022 # every SRIOV device related information is transferred through the
1023 # virt driver and the PciDevice object instead of queried with
1024 # sysfs calls by the network.neutron.API code.
1025 self.useFixture(fixtures.MockPatch(
1026 'nova.pci.utils.get_mac_by_pci_address',
1027 return_value='52:54:00:1e:59:c6'))
1029 self.useFixture(fixtures.MockPatch(
1030 'nova.pci.utils.get_vf_num_by_pci_address',
1031 return_value=1))
1033 def get_available_resource(self, nodename):
1034 host_status = super(
1035 FakeDriverWithPciResources, self).get_available_resource(nodename)
1036 # 01:00.0 - PF - ens1
1037 # |---- 01:00.1 - VF
1038 #
1039 # 02:00.0 - PF - ens2
1040 # |---- 02:00.1 - VF
1041 #
1042 # 03:00.0 - PF - ens3
1043 # |---- 03:00.1 - VF
1044 host_status['pci_passthrough_devices'] = jsonutils.dumps([
1045 {
1046 'address': self.PCI_ADDR_PF1,
1047 'product_id': 'fake-product_id',
1048 'vendor_id': 'fake-vendor_id',
1049 'status': 'available',
1050 'dev_type': 'type-PF',
1051 'parent_addr': None,
1052 'numa_node': 0,
1053 'label': 'fake-label',
1054 },
1055 {
1056 'address': self.PCI_ADDR_PF1_VF1,
1057 'product_id': 'fake-product_id',
1058 'vendor_id': 'fake-vendor_id',
1059 'status': 'available',
1060 'dev_type': 'type-VF',
1061 'parent_addr': self.PCI_ADDR_PF1,
1062 'numa_node': 0,
1063 'label': 'fake-label',
1064 "parent_ifname": self._host + "-ens1",
1065 },
1066 {
1067 'address': self.PCI_ADDR_PF2,
1068 'product_id': 'fake-product_id',
1069 'vendor_id': 'fake-vendor_id',
1070 'status': 'available',
1071 'dev_type': 'type-PF',
1072 'parent_addr': None,
1073 'numa_node': 0,
1074 'label': 'fake-label',
1075 },
1076 {
1077 'address': self.PCI_ADDR_PF2_VF1,
1078 'product_id': 'fake-product_id',
1079 'vendor_id': 'fake-vendor_id',
1080 'status': 'available',
1081 'dev_type': 'type-VF',
1082 'parent_addr': self.PCI_ADDR_PF2,
1083 'numa_node': 0,
1084 'label': 'fake-label',
1085 "parent_ifname": self._host + "-ens2",
1086 },
1087 {
1088 'address': self.PCI_ADDR_PF3,
1089 'product_id': 'fake-product_id',
1090 'vendor_id': 'fake-vendor_id',
1091 'status': 'available',
1092 'dev_type': 'type-PF',
1093 'parent_addr': None,
1094 'numa_node': 0,
1095 'label': 'fake-label',
1096 },
1097 {
1098 'address': self.PCI_ADDR_PF3_VF1,
1099 'product_id': 'fake-product_id',
1100 'vendor_id': 'fake-vendor_id',
1101 'status': 'available',
1102 'dev_type': 'type-VF',
1103 'parent_addr': self.PCI_ADDR_PF3,
1104 'numa_node': 0,
1105 'label': 'fake-label',
1106 "parent_ifname": self._host + "-ens3",
1107 },
1108 ])
1109 return host_status
1112class FakeLiveMigrateDriverWithPciResources(
1113 FakeLiveMigrateDriver, FakeDriverWithPciResources):
1114 """FakeDriver derivative to handle force_complete and abort calls.
1116 This module serves those tests that need to abort or force-complete
1117 the live migration, thus the live migration will never be finished
1118 without the force_complete_migration or delete_migration API calls.
1120 """
1123class FakeDriverWithCaching(FakeDriver):
1124 def __init__(self, *a, **k):
1125 super(FakeDriverWithCaching, self).__init__(*a, **k)
1126 self.cached_images = set()
1128 def cache_image(self, context, image_id):
1129 if image_id in self.cached_images:
1130 return False
1131 else:
1132 self.cached_images.add(image_id)
1133 return True
1136class EphEncryptionDriver(MediumFakeDriver):
1137 capabilities = dict(
1138 FakeDriver.capabilities,
1139 supports_ephemeral_encryption=True)
1142class EphEncryptionDriverLUKS(MediumFakeDriver):
1143 capabilities = dict(
1144 FakeDriver.capabilities,
1145 supports_ephemeral_encryption=True,
1146 supports_ephemeral_encryption_luks=True)
1149class EphEncryptionDriverPLAIN(MediumFakeDriver):
1150 capabilities = dict(
1151 FakeDriver.capabilities,
1152 supports_ephemeral_encryption=True,
1153 supports_ephemeral_encryption_plain=True)
1156class FakeDriverWithoutFakeNodes(FakeDriver):
1157 """FakeDriver that behaves like a real single-node driver.
1159 This behaves like a real virt driver from the perspective of its
1160 nodes, with a stable nodename and use of the global node identity
1161 stuff to provide a stable node UUID.
1162 """
1164 def get_available_resource(self, nodename):
1165 resources = super().get_available_resource(nodename)
1166 resources['uuid'] = nova.virt.node.get_local_node_uuid()
1167 return resources
1169 def get_nodenames_by_uuid(self, refresh=False):
1170 return {
1171 nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
1172 }