Coverage for nova/virt/vmwareapi/driver.py: 0%
281 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
1# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
2# Copyright (c) 2012 VMware, Inc.
3# Copyright (c) 2011 Citrix Systems, Inc.
4# Copyright 2011 OpenStack Foundation
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
18"""
19A connection to the VMware vCenter platform.
20"""
22import os
23import re
25import os_resource_classes as orc
26import os_traits as ot
27from oslo_log import log as logging
28from oslo_utils import excutils
29from oslo_utils import units
30from oslo_utils import versionutils as v_utils
31from oslo_vmware import exceptions as vexc
32from oslo_vmware import pbm
33from oslo_vmware import vim_util
35from nova.compute import power_state
36from nova.compute import task_states
37from nova.compute import utils as compute_utils
38import nova.conf
39from nova import context as nova_context
40from nova import exception
41from nova.i18n import _
42from nova import objects
43import nova.privsep.path
44from nova.virt import driver
45from nova.virt.vmwareapi import constants
46from nova.virt.vmwareapi import ds_util
47from nova.virt.vmwareapi import error_util
48from nova.virt.vmwareapi import host
49from nova.virt.vmwareapi import session
50from nova.virt.vmwareapi import vim_util as nova_vim_util
51from nova.virt.vmwareapi import vm_util
52from nova.virt.vmwareapi import vmops
53from nova.virt.vmwareapi import volumeops
55LOG = logging.getLogger(__name__)
57CONF = nova.conf.CONF
59TIME_BETWEEN_API_CALL_RETRIES = 1.0
60MAX_CONSOLE_BYTES = 100 * units.Ki
63class VMwareVCDriver(driver.ComputeDriver):
64 """The VC host connection object."""
66 capabilities = {
67 "has_imagecache": True,
68 "supports_evacuate": False,
69 "supports_migrate_to_same_host": True,
70 "supports_attach_interface": True,
71 "supports_multiattach": False,
72 "supports_trusted_certs": False,
73 "supports_pcpus": False,
74 "supports_accelerators": False,
75 "supports_remote_managed_ports": False,
76 "supports_address_space_passthrough": False,
77 "supports_address_space_emulated": False,
78 "supports_stateless_firmware": False,
79 "supports_virtio_fs": False,
80 "supports_mem_backing_file": False,
82 # Image type support flags
83 "supports_image_type_aki": False,
84 "supports_image_type_ami": False,
85 "supports_image_type_ari": False,
86 "supports_image_type_iso": True,
87 "supports_image_type_qcow2": False,
88 "supports_image_type_raw": False,
89 "supports_image_type_vdi": False,
90 "supports_image_type_vhd": False,
91 "supports_image_type_vhdx": False,
92 "supports_image_type_vmdk": True,
93 "supports_image_type_ploop": False,
94 }
96 # The vCenter driver includes API that acts on ESX hosts or groups
97 # of ESX hosts in clusters or non-cluster logical-groupings.
98 #
99 # vCenter is not a hypervisor itself, it works with multiple
100 # hypervisor host machines and their guests. This fact can
101 # subtly alter how vSphere and OpenStack interoperate.
103 def __init__(self, virtapi, scheme="https"):
104 super(VMwareVCDriver, self).__init__(virtapi)
106 if (CONF.vmware.host_ip is None or
107 CONF.vmware.host_username is None or
108 CONF.vmware.host_password is None):
109 raise Exception(_("Must specify host_ip, host_username and "
110 "host_password to use vmwareapi.VMwareVCDriver"))
112 self._datastore_regex = None
113 if CONF.vmware.datastore_regex:
114 try:
115 self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
116 except re.error:
117 raise exception.InvalidInput(reason=
118 _("Invalid Regular Expression %s")
119 % CONF.vmware.datastore_regex)
121 self._session = session.VMwareAPISession(scheme=scheme)
123 self._check_min_version()
125 # Update the PBM location if necessary
126 if CONF.vmware.pbm_enabled:
127 self._update_pbm_location()
129 self._validate_configuration()
130 self._cluster_name = CONF.vmware.cluster_name
131 self._cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
132 self._cluster_name)
133 if self._cluster_ref is None:
134 raise exception.NotFound(_("The specified cluster '%s' was not "
135 "found in vCenter")
136 % self._cluster_name)
137 self._vcenter_uuid = self._get_vcenter_uuid()
138 self._nodename = \
139 self._create_nodename(vim_util.get_moref_value(self._cluster_ref))
140 self._volumeops = volumeops.VMwareVolumeOps(self._session,
141 self._cluster_ref)
142 self._vmops = vmops.VMwareVMOps(self._session,
143 virtapi,
144 self._volumeops,
145 self._cluster_ref,
146 datastore_regex=self._datastore_regex)
147 self._vc_state = host.VCState(self._session,
148 self._nodename,
149 self._cluster_ref,
150 self._datastore_regex)
152 # Register the OpenStack extension
153 self._register_openstack_extension()
155 def _check_min_version(self):
156 min_version = v_utils.convert_version_to_int(constants.MIN_VC_VERSION)
157 next_min_ver = v_utils.convert_version_to_int(
158 constants.NEXT_MIN_VC_VERSION)
159 vc_version = vim_util.get_vc_version(self._session)
160 LOG.info("VMware vCenter version: %s", vc_version)
161 if v_utils.convert_version_to_int(vc_version) < min_version:
162 raise exception.NovaException(
163 _('Detected vCenter version %(version)s. Nova requires VMware '
164 'vCenter version %(min_version)s or greater.') % {
165 'version': vc_version,
166 'min_version': constants.MIN_VC_VERSION})
167 elif v_utils.convert_version_to_int(vc_version) < next_min_ver:
168 LOG.warning('Running Nova with a VMware vCenter version less '
169 'than %(version)s is deprecated. The required '
170 'minimum version of vCenter will be raised to '
171 '%(version)s in the 16.0.0 release.',
172 {'version': constants.NEXT_MIN_VC_VERSION})
174 def _update_pbm_location(self):
175 if CONF.vmware.pbm_wsdl_location:
176 pbm_wsdl_loc = CONF.vmware.pbm_wsdl_location
177 else:
178 version = vim_util.get_vc_version(self._session)
179 pbm_wsdl_loc = pbm.get_pbm_wsdl_location(version)
180 self._session.pbm_wsdl_loc_set(pbm_wsdl_loc)
182 def _validate_configuration(self):
183 if CONF.vmware.pbm_enabled:
184 if not CONF.vmware.pbm_default_policy:
185 raise error_util.PbmDefaultPolicyUnspecified()
186 if not pbm.get_profile_id_by_name(
187 self._session,
188 CONF.vmware.pbm_default_policy):
189 raise error_util.PbmDefaultPolicyDoesNotExist()
190 if CONF.vmware.datastore_regex:
191 LOG.warning("datastore_regex is ignored when PBM is enabled")
192 self._datastore_regex = None
194 def init_host(self, host):
195 LOG.warning(
196 'The vmwareapi driver is not tested by the OpenStack project nor '
197 'does it have clear maintainer(s) and thus its quality can not be '
198 'ensured. It should be considered experimental and may be removed '
199 'in a future release. If you are using the driver in production '
200 'please let us know via the openstack-discuss mailing list.'
201 )
203 vim = self._session.vim
204 if vim is None:
205 self._session._create_session()
207 def cleanup_host(self, host):
208 self._session.logout()
210 def _register_openstack_extension(self):
211 # Register an 'OpenStack' extension in vCenter
212 os_extension = self._session._call_method(vim_util, 'find_extension',
213 constants.EXTENSION_KEY)
214 if os_extension is None:
215 try:
216 self._session._call_method(vim_util, 'register_extension',
217 constants.EXTENSION_KEY,
218 constants.EXTENSION_TYPE_INSTANCE)
219 LOG.info('Registered extension %s with vCenter',
220 constants.EXTENSION_KEY)
221 except vexc.VimFaultException as e:
222 with excutils.save_and_reraise_exception() as ctx:
223 if 'InvalidArgument' in e.fault_list:
224 LOG.debug('Extension %s already exists.',
225 constants.EXTENSION_KEY)
226 ctx.reraise = False
227 else:
228 LOG.debug('Extension %s already exists.', constants.EXTENSION_KEY)
230 def cleanup(self, context, instance, network_info, block_device_info=None,
231 destroy_disks=True, migrate_data=None, destroy_vifs=True,
232 destroy_secrets=True):
233 """Cleanup after instance being destroyed by Hypervisor."""
234 pass
236 def resume_state_on_host_boot(self, context, instance, network_info,
237 share_info, block_device_info=None):
238 """resume guest state when a host is booted."""
239 # Check if the instance is running already and avoid doing
240 # anything if it is.
241 state = vm_util.get_vm_state(self._session, instance)
242 ignored_states = [power_state.RUNNING, power_state.SUSPENDED]
243 if state in ignored_states:
244 return
245 # Instance is not up and could be in an unknown state.
246 # Be as absolute as possible about getting it back into
247 # a known and running state.
248 self.reboot(context, instance, network_info, 'hard',
249 block_device_info)
251 def list_instance_uuids(self):
252 """List VM instance UUIDs."""
253 return self._vmops.list_instances()
255 def list_instances(self):
256 """List VM instances from the single compute node."""
257 return self._vmops.list_instances()
259 def migrate_disk_and_power_off(self, context, instance, dest,
260 flavor, network_info,
261 block_device_info=None,
262 timeout=0, retry_interval=0):
263 """Transfers the disk of a running instance in multiple phases, turning
264 off the instance before the end.
265 """
266 # TODO(PhilDay): Add support for timeout (clean shutdown)
267 return self._vmops.migrate_disk_and_power_off(context, instance,
268 dest, flavor)
270 def confirm_migration(self, context, migration, instance, network_info):
271 """Confirms a resize, destroying the source VM."""
272 self._vmops.confirm_migration(migration, instance, network_info)
274 def finish_revert_migration(self, context, instance, network_info,
275 migration, block_device_info=None,
276 power_on=True):
277 """Finish reverting a resize, powering back on the instance."""
278 self._vmops.finish_revert_migration(context, instance, network_info,
279 block_device_info, power_on)
281 def finish_migration(self, context, migration, instance, disk_info,
282 network_info, image_meta, resize_instance,
283 allocations, block_device_info=None, power_on=True):
284 """Completes a resize, turning on the migrated instance."""
285 self._vmops.finish_migration(context, migration, instance, disk_info,
286 network_info, image_meta, resize_instance,
287 block_device_info, power_on)
289 def pre_live_migration(self, context, instance, block_device_info,
290 network_info, disk_info, migrate_data):
291 return migrate_data
293 def post_live_migration_at_source(self, context, instance, network_info):
294 pass
296 def post_live_migration_at_destination(self, context, instance,
297 network_info,
298 block_migration=False,
299 block_device_info=None):
300 pass
302 def cleanup_live_migration_destination_check(self, context,
303 dest_check_data):
304 pass
306 def live_migration(self, context, instance, dest,
307 post_method, recover_method, block_migration=False,
308 migrate_data=None):
309 """Live migration of an instance to another host."""
310 self._vmops.live_migration(context, instance, dest, post_method,
311 recover_method, block_migration,
312 migrate_data)
314 def check_can_live_migrate_source(self, context, instance,
315 dest_check_data, block_device_info=None):
316 cluster_name = dest_check_data.cluster_name
317 cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
318 cluster_name)
319 if cluster_ref is None:
320 msg = (_("Cannot find destination cluster %s for live migration") %
321 cluster_name)
322 raise exception.MigrationPreCheckError(reason=msg)
323 res_pool_ref = vm_util.get_res_pool_ref(self._session, cluster_ref)
324 if res_pool_ref is None:
325 msg = _("Cannot find destination resource pool for live migration")
326 raise exception.MigrationPreCheckError(reason=msg)
327 return dest_check_data
329 def check_can_live_migrate_destination(self, context, instance,
330 src_compute_info, dst_compute_info,
331 block_migration=False,
332 disk_over_commit=False):
333 # the information that we need for the destination compute node
334 # is the name of its cluster and datastore regex
335 data = objects.VMwareLiveMigrateData()
336 data.cluster_name = CONF.vmware.cluster_name
337 data.datastore_regex = CONF.vmware.datastore_regex
338 return data
340 def rollback_live_migration_at_destination(self, context, instance,
341 network_info,
342 block_device_info,
343 destroy_disks=True,
344 migrate_data=None):
345 """Clean up destination node after a failed live migration."""
346 self.destroy(context, instance, network_info, block_device_info)
348 def get_instance_disk_info(self, instance, block_device_info=None):
349 pass
351 def get_vnc_console(self, context, instance):
352 """Return link to instance's VNC console using vCenter logic."""
353 # vCenter does not actually run the VNC service
354 # itself. You must talk to the VNC host underneath vCenter.
355 return self._vmops.get_vnc_console(instance)
357 def get_mks_console(self, context, instance):
358 return self._vmops.get_mks_console(instance)
360 def get_console_output(self, context, instance):
361 if not CONF.vmware.serial_log_dir:
362 LOG.error("The 'serial_log_dir' config option is not set!")
363 return
364 fname = instance.uuid.replace('-', '')
365 path = os.path.join(CONF.vmware.serial_log_dir, fname)
366 if not os.path.exists(path):
367 LOG.warning('The console log is missing. Check your VSPC '
368 'configuration', instance=instance)
369 return b""
370 read_log_data, remaining = nova.privsep.path.last_bytes(
371 path, MAX_CONSOLE_BYTES)
372 return read_log_data
374 def _get_vcenter_uuid(self):
375 """Retrieves the vCenter UUID."""
377 about = self._session._call_method(nova_vim_util, 'get_about_info')
378 return about.instanceUuid
380 def _create_nodename(self, mo_id):
381 """Return a nodename which uniquely describes a cluster.
383 The name will be of the form:
384 <mo id>.<vcenter uuid>
385 e.g.
386 domain-26.9d51f082-58a4-4449-beed-6fd205a5726b
387 """
389 return '%s.%s' % (mo_id, self._vcenter_uuid)
391 def _get_available_resources(self, host_stats):
392 return {'vcpus': host_stats['vcpus'],
393 'memory_mb': host_stats['host_memory_total'],
394 'local_gb': host_stats['disk_total'],
395 'vcpus_used': 0,
396 'memory_mb_used': host_stats['host_memory_total'] -
397 host_stats['host_memory_free'],
398 'local_gb_used': host_stats['disk_used'],
399 'hypervisor_type': host_stats['hypervisor_type'],
400 'hypervisor_version': host_stats['hypervisor_version'],
401 'hypervisor_hostname': host_stats['hypervisor_hostname'],
402 # The VMWare driver manages multiple hosts, so there are
403 # likely many different CPU models in use. As such it is
404 # impossible to provide any meaningful info on the CPU
405 # model of the "host"
406 'cpu_info': None,
407 'supported_instances': host_stats['supported_instances'],
408 'numa_topology': None,
409 }
411 def get_available_resource(self, nodename):
412 """Retrieve resource info.
414 This method is called when nova-compute launches, and
415 as part of a periodic task.
417 :returns: dictionary describing resources
419 """
420 host_stats = self._vc_state.get_host_stats(refresh=True)
421 stats_dict = self._get_available_resources(host_stats)
422 return stats_dict
424 def get_available_nodes(self, refresh=False):
425 """Returns nodenames of all nodes managed by the compute service.
427 This driver supports only one compute node.
428 """
429 return [self._nodename]
431 def update_provider_tree(self, provider_tree, nodename, allocations=None):
432 """Update a ProviderTree object with current resource provider,
433 inventory information and CPU traits.
435 :param nova.compute.provider_tree.ProviderTree provider_tree:
436 A nova.compute.provider_tree.ProviderTree object representing all
437 the providers in the tree associated with the compute node, and any
438 sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
439 trait) associated via aggregate with any of those providers (but
440 not *their* tree- or aggregate-associated providers), as currently
441 known by placement.
442 :param nodename:
443 String name of the compute node (i.e.
444 ComputeNode.hypervisor_hostname) for which the caller is requesting
445 updated provider information.
446 :param allocations:
447 Dict of allocation data of the form:
448 { $CONSUMER_UUID: {
449 # The shape of each "allocations" dict below is identical
450 # to the return from GET /allocations/{consumer_uuid}
451 "allocations": {
452 $RP_UUID: {
453 "generation": $RP_GEN,
454 "resources": {
455 $RESOURCE_CLASS: $AMOUNT,
456 ...
457 },
458 },
459 ...
460 },
461 "project_id": $PROJ_ID,
462 "user_id": $USER_ID,
463 "consumer_generation": $CONSUMER_GEN,
464 },
465 ...
466 }
467 If None, and the method determines that any inventory needs to be
468 moved (from one provider to another and/or to a different resource
469 class), the ReshapeNeeded exception must be raised. Otherwise, this
470 dict must be edited in place to indicate the desired final state of
471 allocations.
472 :raises ReshapeNeeded: If allocations is None and any inventory needs
473 to be moved from one provider to another and/or to a different
474 resource class. At this time the VMware driver does not reshape.
475 :raises: ReshapeFailed if the requested tree reshape fails for
476 whatever reason.
477 """
478 # NOTE(cdent): This is a side-effecty method, we are changing the
479 # the provider tree in place (on purpose).
480 inv = provider_tree.data(nodename).inventory
481 ratios = self._get_allocation_ratios(inv)
482 stats = vm_util.get_stats_from_cluster(self._session,
483 self._cluster_ref)
484 datastores = ds_util.get_available_datastores(self._session,
485 self._cluster_ref,
486 self._datastore_regex)
487 total_disk_capacity = sum([ds.capacity for ds in datastores])
488 max_free_space = max([ds.freespace for ds in datastores])
489 reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
490 CONF.reserved_host_disk_mb)
491 result = {
492 orc.VCPU: {
493 'total': stats['cpu']['vcpus'],
494 'reserved': CONF.reserved_host_cpus,
495 'min_unit': 1,
496 'max_unit': stats['cpu']['max_vcpus_per_host'],
497 'step_size': 1,
498 'allocation_ratio': ratios[orc.VCPU],
499 },
500 orc.MEMORY_MB: {
501 'total': stats['mem']['total'],
502 'reserved': CONF.reserved_host_memory_mb,
503 'min_unit': 1,
504 'max_unit': stats['mem']['max_mem_mb_per_host'],
505 'step_size': 1,
506 'allocation_ratio': ratios[orc.MEMORY_MB],
507 },
508 }
510 # If a sharing DISK_GB provider exists in the provider tree, then our
511 # storage is shared, and we should not report the DISK_GB inventory in
512 # the compute node provider.
513 # TODO(cdent): We don't do this yet, in part because of the issues
514 # in bug #1784020, but also because we can represent all datastores
515 # as shared providers and should do once update_provider_tree is
516 # working well.
517 if provider_tree.has_sharing_provider(orc.DISK_GB):
518 LOG.debug('Ignoring sharing provider - see bug #1784020')
519 result[orc.DISK_GB] = {
520 'total': total_disk_capacity // units.Gi,
521 'reserved': reserved_disk_gb,
522 'min_unit': 1,
523 'max_unit': max_free_space // units.Gi,
524 'step_size': 1,
525 'allocation_ratio': ratios[orc.DISK_GB],
526 }
528 provider_tree.update_inventory(nodename, result)
530 # TODO(cdent): Here is where additional functionality would be added.
531 # In the libvirt driver this is where nested GPUs are reported and
532 # where cpu traits are added. In the vmware world, this is where we
533 # would add nested providers representing tenant VDC and similar.
535 # nova with vmware only supports HW_ARCH_X86_64
536 cpu_arch_trait = 'HW_ARCH_X86_64'
537 if cpu_arch_trait in ot.get_traits('HW_ARCH_'):
538 provider_tree.add_traits(nodename, cpu_arch_trait)
540 def prepare_for_spawn(self, instance):
541 """Perform pre-checks for spawn."""
542 self._vmops.prepare_for_spawn(instance)
544 def spawn(self, context, instance, image_meta, injected_files,
545 admin_password, allocations, network_info=None,
546 block_device_info=None, power_on=True, accel_info=None):
547 """Create VM instance."""
548 self._vmops.spawn(context, instance, image_meta, injected_files,
549 admin_password, network_info, block_device_info)
551 def attach_volume(self, context, connection_info, instance, mountpoint,
552 disk_bus=None, device_type=None, encryption=None):
553 """Attach volume storage to VM instance."""
554 return self._volumeops.attach_volume(connection_info, instance)
556 def detach_volume(self, context, connection_info, instance, mountpoint,
557 encryption=None):
558 """Detach volume storage to VM instance."""
559 # NOTE(claudiub): if context parameter is to be used in the future,
560 # the _detach_instance_volumes method will have to be updated as well.
561 return self._volumeops.detach_volume(connection_info, instance)
563 def get_volume_connector(self, instance):
564 """Return volume connector information."""
565 return self._volumeops.get_volume_connector(instance)
567 def get_host_ip_addr(self):
568 """Returns the IP address of the vCenter host."""
569 return CONF.vmware.host_ip
571 def snapshot(self, context, instance, image_id, update_task_state):
572 """Create snapshot from a running VM instance."""
573 self._vmops.snapshot(context, instance, image_id, update_task_state)
575 def reboot(self, context, instance, network_info, reboot_type,
576 block_device_info=None, bad_volumes_callback=None,
577 accel_info=None, share_info=None):
578 """Reboot VM instance."""
579 self._vmops.reboot(instance, network_info, reboot_type)
581 def _detach_instance_volumes(self, instance, block_device_info):
582 # We need to detach attached volumes
583 block_device_mapping = driver.block_device_info_get_mapping(
584 block_device_info)
585 if block_device_mapping:
586 # Certain disk types, for example 'IDE' do not support hot
587 # plugging. Hence we need to power off the instance and update
588 # the instance state.
589 self._vmops.power_off(instance)
590 for disk in block_device_mapping:
591 connection_info = disk['connection_info']
592 try:
593 # NOTE(claudiub): Passing None as the context, as it is
594 # not currently used.
595 self.detach_volume(None, connection_info, instance,
596 disk.get('device_name'))
597 except exception.DiskNotFound:
598 LOG.warning('The volume %s does not exist!',
599 disk.get('device_name'),
600 instance=instance)
601 except Exception as e:
602 with excutils.save_and_reraise_exception():
603 LOG.error("Failed to detach %(device_name)s. "
604 "Exception: %(exc)s",
605 {'device_name': disk.get('device_name'),
606 'exc': e},
607 instance=instance)
609 def destroy(self, context, instance, network_info, block_device_info=None,
610 destroy_disks=True, destroy_secrets=True):
611 """Destroy VM instance."""
613 # Destroy gets triggered when Resource Claim in resource_tracker
614 # is not successful. When resource claim is not successful,
615 # node is not set in instance. Perform destroy only if node is set
616 if not instance.node:
617 return
619 # A resize uses the same instance on the VC. We do not delete that
620 # VM in the event of a revert
621 if instance.task_state == task_states.RESIZE_REVERTING:
622 return
624 # We need to detach attached volumes
625 if block_device_info is not None:
626 try:
627 self._detach_instance_volumes(instance, block_device_info)
628 except (vexc.ManagedObjectNotFoundException,
629 exception.InstanceNotFound):
630 LOG.warning('Instance does not exists. Proceeding to '
631 'delete instance properties on datastore',
632 instance=instance)
633 self._vmops.destroy(instance, destroy_disks)
635 def pause(self, instance):
636 """Pause VM instance."""
637 self._vmops.pause(instance)
639 def unpause(self, instance):
640 """Unpause paused VM instance."""
641 self._vmops.unpause(instance)
643 def suspend(self, context, instance):
644 """Suspend the specified instance."""
645 self._vmops.suspend(instance)
647 def resume(
648 self,
649 context,
650 instance,
651 network_info,
652 block_device_info=None,
653 share_info=None
654 ):
655 """Resume the suspended VM instance."""
656 self._vmops.resume(instance)
658 def rescue(self, context, instance, network_info, image_meta,
659 rescue_password, block_device_info, share_info):
660 """Rescue the specified instance."""
661 self._vmops.rescue(context, instance, network_info, image_meta)
663 def unrescue(
664 self,
665 context: nova_context.RequestContext,
666 instance: 'objects.Instance',
667 ):
668 """Unrescue the specified instance."""
669 self._vmops.unrescue(instance)
671 def power_off(self, instance, timeout=0, retry_interval=0):
672 """Power off the specified instance."""
673 self._vmops.power_off(instance, timeout, retry_interval)
675 def power_on(self, context, instance, network_info,
676 block_device_info=None, accel_info=None, share_info=None):
677 """Power on the specified instance."""
678 self._vmops.power_on(instance)
680 def poll_rebooting_instances(self, timeout, instances):
681 """Poll for rebooting instances."""
682 self._vmops.poll_rebooting_instances(timeout, instances)
684 def get_info(self, instance, use_cache=True):
685 """Return info about the VM instance."""
686 return self._vmops.get_info(instance)
688 def get_diagnostics(self, instance):
689 """Return data about VM diagnostics."""
690 return self._vmops.get_diagnostics(instance)
692 def get_instance_diagnostics(self, instance):
693 """Return data about VM diagnostics."""
694 return self._vmops.get_instance_diagnostics(instance)
696 def host_power_action(self, action):
697 """Host operations not supported by VC driver.
699 This needs to override the ESX driver implementation.
700 """
701 raise NotImplementedError()
703 def host_maintenance_mode(self, host, mode):
704 """Host operations not supported by VC driver.
706 This needs to override the ESX driver implementation.
707 """
708 raise NotImplementedError()
710 def set_host_enabled(self, enabled):
711 """Host operations not supported by VC driver.
713 This needs to override the ESX driver implementation.
714 """
715 raise NotImplementedError()
717 def get_host_uptime(self):
718 """Host uptime operation not supported by VC driver."""
720 msg = _("Multiple hosts may be managed by the VMWare "
721 "vCenter driver; therefore we do not return "
722 "uptime for just one host.")
723 raise NotImplementedError(msg)
725 def inject_network_info(self, instance, nw_info):
726 """inject network info for specified instance."""
727 self._vmops.inject_network_info(instance, nw_info)
729 def manage_image_cache(self, context, all_instances):
730 """Manage the local cache of images."""
731 self._vmops.manage_image_cache(context, all_instances)
733 def instance_exists(self, instance):
734 """Efficient override of base instance_exists method."""
735 return self._vmops.instance_exists(instance)
737 def attach_interface(self, context, instance, image_meta, vif):
738 """Attach an interface to the instance."""
739 self._vmops.attach_interface(context, instance, image_meta, vif)
741 def detach_interface(self, context, instance, vif):
742 """Detach an interface from the instance."""
743 self._vmops.detach_interface(context, instance, vif)