Coverage for nova/virt/vmwareapi/vmops.py: 0%
1045 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
1# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
2# Copyright (c) 2012 VMware, Inc.
3# Copyright (c) 2011 Citrix Systems, Inc.
4# Copyright 2011 OpenStack Foundation
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
18"""
19Class for VM tasks like spawn, snapshot, suspend, resume etc.
20"""
22import collections
23import os
24import re
25import time
27import decorator
28from oslo_concurrency import lockutils
29from oslo_log import log as logging
30from oslo_serialization import jsonutils
31from oslo_utils import excutils
32from oslo_utils import strutils
33from oslo_utils import units
34from oslo_utils import uuidutils
35from oslo_vmware import exceptions as vexc
36from oslo_vmware.objects import datastore as ds_obj
37from oslo_vmware import vim_util as vutil
39from nova.api.metadata import base as instance_metadata
40from nova.compute import api as compute
41from nova.compute import power_state
42from nova.compute import task_states
43import nova.conf
44from nova.console import type as ctype
45from nova import context as nova_context
46from nova import exception
47from nova.i18n import _
48from nova.network import neutron
49from nova import objects
50from nova.objects import fields
51from nova import utils
52from nova import version
53from nova.virt import configdrive
54from nova.virt import driver
55from nova.virt import hardware
56from nova.virt.vmwareapi import constants
57from nova.virt.vmwareapi import ds_util
58from nova.virt.vmwareapi import error_util
59from nova.virt.vmwareapi import imagecache
60from nova.virt.vmwareapi import images
61from nova.virt.vmwareapi import vif as vmwarevif
62from nova.virt.vmwareapi import vim_util
63from nova.virt.vmwareapi import vm_util
66CONF = nova.conf.CONF
68LOG = logging.getLogger(__name__)
70RESIZE_TOTAL_STEPS = 6
73class VirtualMachineInstanceConfigInfo(object):
74 """Parameters needed to create and configure a new instance."""
76 def __init__(self, instance, image_info, datastore, dc_info, image_cache,
77 extra_specs=None):
79 # Some methods called during spawn take the instance parameter purely
80 # for logging purposes.
81 # TODO(vui) Clean them up, so we no longer need to keep this variable
82 self.instance = instance
84 self.ii = image_info
85 self.root_gb = instance.flavor.root_gb
86 self.datastore = datastore
87 self.dc_info = dc_info
88 self._image_cache = image_cache
89 self._extra_specs = extra_specs
91 @property
92 def cache_image_folder(self):
93 if self.ii.image_id is None:
94 return
95 return self._image_cache.get_image_cache_folder(
96 self.datastore, self.ii.image_id)
98 @property
99 def cache_image_path(self):
100 if self.ii.image_id is None:
101 return
102 cached_image_file_name = "%s.%s" % (self.ii.image_id,
103 self.ii.file_type)
104 return self.cache_image_folder.join(cached_image_file_name)
107# Note(vui): See https://bugs.launchpad.net/nova/+bug/1363349
108# for cases where mocking time.sleep() can have unintended effects on code
109# not under test. For now, unblock the affected test cases by providing
110# a wrapper function to work around needing to mock time.sleep()
111def _time_sleep_wrapper(delay):
112 time.sleep(delay)
115@decorator.decorator
116def retry_if_task_in_progress(f, *args, **kwargs):
117 retries = max(CONF.vmware.api_retry_count, 1)
118 delay = 1
119 for attempt in range(1, retries + 1):
120 if attempt != 1:
121 _time_sleep_wrapper(delay)
122 delay = min(2 * delay, 60)
123 try:
124 f(*args, **kwargs)
125 return
126 except vexc.TaskInProgress:
127 pass
130class VMwareVMOps(object):
131 """Management class for VM-related tasks."""
133 def __init__(self, session, virtapi, volumeops, cluster=None,
134 datastore_regex=None):
135 """Initializer."""
136 self.compute_api = compute.API()
137 self._session = session
138 self._virtapi = virtapi
139 self._volumeops = volumeops
140 self._cluster = cluster
141 self._root_resource_pool = vm_util.get_res_pool_ref(self._session,
142 self._cluster)
143 self._datastore_regex = datastore_regex
144 self._base_folder = self._get_base_folder()
145 self._tmp_folder = 'vmware_temp'
146 self._datastore_browser_mapping = {}
147 self._imagecache = imagecache.ImageCacheManager(self._session,
148 self._base_folder)
149 self._network_api = neutron.API()
151 def _get_base_folder(self):
152 # Enable more than one compute node to run on the same host
153 if CONF.vmware.cache_prefix:
154 base_folder = '%s%s' % (CONF.vmware.cache_prefix,
155 CONF.image_cache.subdirectory_name)
156 # Ensure that the base folder is unique per compute node
157 elif CONF.image_cache.remove_unused_base_images:
158 base_folder = '%s%s' % (CONF.my_ip,
159 CONF.image_cache.subdirectory_name)
160 else:
161 # Aging disable ensures backward compatibility
162 base_folder = CONF.image_cache.subdirectory_name
163 return base_folder
165 def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
166 service_content = self._session.vim.service_content
167 LOG.debug("Extending root virtual disk to %s", requested_size,
168 instance=instance)
169 vmdk_extend_task = self._session._call_method(
170 self._session.vim,
171 "ExtendVirtualDisk_Task",
172 service_content.virtualDiskManager,
173 name=name,
174 datacenter=dc_ref,
175 newCapacityKb=requested_size,
176 eagerZero=False)
177 try:
178 self._session._wait_for_task(vmdk_extend_task)
179 except Exception as e:
180 with excutils.save_and_reraise_exception():
181 LOG.error('Extending virtual disk failed with error: %s',
182 e, instance=instance)
183 # Clean up files created during the extend operation
184 files = [name.replace(".vmdk", "-flat.vmdk"), name]
185 for file in files:
186 ds_path = ds_obj.DatastorePath.parse(file)
187 self._delete_datastore_file(ds_path, dc_ref)
189 LOG.debug("Extended root virtual disk", instance=instance)
191 def _delete_datastore_file(self, datastore_path, dc_ref):
192 try:
193 ds_util.file_delete(self._session, datastore_path, dc_ref)
194 except (vexc.CannotDeleteFileException,
195 vexc.FileFaultException,
196 vexc.FileLockedException,
197 vexc.FileNotFoundException):
198 LOG.debug("Unable to delete %(ds)s. There may be more than "
199 "one process or thread trying to delete the file",
200 {'ds': datastore_path},
201 exc_info=True)
203 def _extend_if_required(self, dc_info, image_info, instance,
204 root_vmdk_path):
205 """Increase the size of the root vmdk if necessary."""
206 if instance.flavor.root_gb * units.Gi > image_info.file_size:
207 size_in_kb = instance.flavor.root_gb * units.Mi
208 self._extend_virtual_disk(instance, size_in_kb,
209 root_vmdk_path, dc_info.ref)
211 def _configure_config_drive(self, context, instance, vm_ref, dc_info,
212 datastore, injected_files, admin_password,
213 network_info):
214 session_vim = self._session.vim
215 cookies = session_vim.client.cookiejar
216 dc_path = vutil.get_inventory_path(session_vim, dc_info.ref)
217 uploaded_iso_path = self._create_config_drive(context,
218 instance,
219 injected_files,
220 admin_password,
221 network_info,
222 datastore.name,
223 dc_path,
224 instance.uuid,
225 cookies)
226 uploaded_iso_path = datastore.build_path(uploaded_iso_path)
227 self._attach_cdrom_to_vm(
228 vm_ref, instance,
229 datastore.ref,
230 str(uploaded_iso_path))
232 def _get_instance_metadata(self, context, instance, flavor=None):
233 if not flavor:
234 flavor = instance.flavor
235 metadata = [('name', instance.display_name),
236 ('userid', context.user_id),
237 ('username', context.user_name),
238 ('projectid', context.project_id),
239 ('projectname', context.project_name),
240 ('flavor:name', flavor.name),
241 ('flavor:memory_mb', flavor.memory_mb),
242 ('flavor:vcpus', flavor.vcpus),
243 ('flavor:ephemeral_gb', flavor.ephemeral_gb),
244 ('flavor:root_gb', flavor.root_gb),
245 ('flavor:swap', flavor.swap),
246 ('imageid', instance.image_ref),
247 ('package', version.version_string_with_package())]
248 # NOTE: formatted as lines like this: 'name:NAME\nuserid:ID\n...'
249 return ''.join(['%s:%s\n' % (k, v) for k, v in metadata])
251 def _create_folders(self, parent_folder, folder_path):
252 folders = folder_path.split('/')
253 path_list = []
254 for folder in folders:
255 path_list.append(folder)
256 folder_path = '/'.join(path_list)
257 folder_ref = vm_util.folder_ref_cache_get(folder_path)
258 if not folder_ref:
259 folder_ref = vm_util.create_folder(self._session,
260 parent_folder,
261 folder)
262 vm_util.folder_ref_cache_update(folder_path, folder_ref)
263 parent_folder = folder_ref
264 return folder_ref
266 def _get_folder_name(self, name, id_):
267 # Maximum folder length must be less than 80 characters.
268 # The 'id' length is 36. The maximum prefix for name is 40.
269 # We cannot truncate the 'id' as this is unique across OpenStack.
270 return '%s (%s)' % (name[:40], id_[:36])
272 def build_virtual_machine(self, instance, image_info,
273 dc_info, datastore, network_info, extra_specs,
274 metadata):
275 vif_infos = vmwarevif.get_vif_info(self._session,
276 self._cluster,
277 image_info.vif_model,
278 network_info)
279 LOG.debug('Instance VIF info %s', vif_infos, instance=instance)
281 if extra_specs.storage_policy:
282 profile_spec = vm_util.get_storage_profile_spec(
283 self._session, extra_specs.storage_policy)
284 else:
285 profile_spec = None
286 # Get the create vm config spec
287 client_factory = self._session.vim.client.factory
288 config_spec = vm_util.get_vm_create_spec(client_factory,
289 instance,
290 datastore.name,
291 vif_infos,
292 extra_specs,
293 image_info.os_type,
294 profile_spec=profile_spec,
295 metadata=metadata)
297 folder_name = self._get_folder_name('Project',
298 instance.project_id)
299 folder_path = 'OpenStack/%s/Instances' % folder_name
300 folder = self._create_folders(dc_info.vmFolder, folder_path)
302 # Create the VM
303 vm_ref = vm_util.create_vm(self._session, instance, folder,
304 config_spec, self._root_resource_pool)
305 return vm_ref
307 def _get_extra_specs(self, flavor, image_meta=None):
308 image_meta = image_meta or objects.ImageMeta.from_dict({})
309 extra_specs = vm_util.ExtraSpecs()
310 for resource in ['cpu', 'memory', 'disk_io', 'vif']:
311 for (key, type) in (('limit', int),
312 ('reservation', int),
313 ('shares_level', str),
314 ('shares_share', int)):
315 value = flavor.extra_specs.get('quota:' + resource + '_' + key)
316 if value:
317 setattr(getattr(extra_specs, resource + '_limits'),
318 key, type(value))
319 extra_specs.cpu_limits.validate()
320 extra_specs.memory_limits.validate()
321 extra_specs.disk_io_limits.validate()
322 extra_specs.vif_limits.validate()
323 hw_firmware_type = image_meta.properties.get('hw_firmware_type')
324 if hw_firmware_type == fields.FirmwareType.UEFI:
325 extra_specs.firmware = 'efi'
326 elif hw_firmware_type == fields.FirmwareType.BIOS:
327 extra_specs.firmware = 'bios'
328 hw_version = flavor.extra_specs.get('vmware:hw_version')
329 extra_specs.hw_version = hw_version
331 video_ram = image_meta.properties.get('hw_video_ram', 0)
332 max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
334 if video_ram > max_vram:
335 raise exception.RequestedVRamTooHigh(req_vram=video_ram,
336 max_vram=max_vram)
337 if video_ram and max_vram:
338 extra_specs.hw_video_ram = video_ram * units.Mi / units.Ki
340 if CONF.vmware.pbm_enabled:
341 storage_policy = flavor.extra_specs.get('vmware:storage_policy',
342 CONF.vmware.pbm_default_policy)
343 extra_specs.storage_policy = storage_policy
344 topology = hardware.get_best_cpu_topology(flavor, image_meta,
345 allow_threads=False)
346 extra_specs.cores_per_socket = topology.cores
347 return extra_specs
349 def _get_esx_host_and_cookies(self, datastore, dc_path, file_path):
350 hosts = datastore.get_connected_hosts(self._session)
351 host = ds_obj.Datastore.choose_host(hosts)
352 host_name = self._session._call_method(vutil, 'get_object_property',
353 host, 'name')
354 url = ds_obj.DatastoreURL('https', host_name, file_path, dc_path,
355 datastore.name)
356 cookie_header = url.get_transfer_ticket(self._session, 'PUT')
357 name, value = cookie_header.split('=')
358 # TODO(rgerganov): this is a hack to emulate cookiejar until we fix
359 # oslo.vmware to accept plain http headers
360 Cookie = collections.namedtuple('Cookie', ['name', 'value'])
361 return host_name, [Cookie(name, value)]
363 def _fetch_vsphere_image(self, context, vi, image_ds_loc):
364 """Fetch image which is located on a vSphere datastore."""
365 location = vi.ii.vsphere_location
366 LOG.debug("Using vSphere location: %s", location)
368 LOG.debug("Copying image file data %(image_id)s to "
369 "%(file_path)s on the data store "
370 "%(datastore_name)s",
371 {'image_id': vi.ii.image_id,
372 'file_path': image_ds_loc,
373 'datastore_name': vi.datastore.name},
374 instance=vi.instance)
376 location_url = ds_obj.DatastoreURL.urlparse(location)
377 datacenter_path = location_url.datacenter_path
378 datacenter_moref = ds_util.get_datacenter_ref(
379 self._session, datacenter_path)
381 datastore_name = location_url.datastore_name
382 src_path = ds_obj.DatastorePath(datastore_name, location_url.path)
383 ds_util.file_copy(
384 self._session, str(src_path), datacenter_moref,
385 str(image_ds_loc), vi.dc_info.ref)
387 LOG.debug("Copied image file data %(image_id)s to "
388 "%(file_path)s on the data store "
389 "%(datastore_name)s",
390 {'image_id': vi.ii.image_id,
391 'file_path': image_ds_loc,
392 'datastore_name': vi.datastore.name},
393 instance=vi.instance)
395 def _fetch_image_as_file(self, context, vi, image_ds_loc):
396 """Download image as an individual file to host via HTTP PUT."""
397 session = self._session
399 LOG.debug("Downloading image file data %(image_id)s to "
400 "%(file_path)s on the data store "
401 "%(datastore_name)s",
402 {'image_id': vi.ii.image_id,
403 'file_path': image_ds_loc,
404 'datastore_name': vi.datastore.name},
405 instance=vi.instance)
407 # try to get esx cookie to upload
408 try:
409 dc_path = 'ha-datacenter'
410 host, cookies = self._get_esx_host_and_cookies(vi.datastore,
411 dc_path, image_ds_loc.rel_path)
412 except Exception as e:
413 LOG.warning("Get esx cookies failed: %s", e,
414 instance=vi.instance)
415 dc_path = vutil.get_inventory_path(session.vim, vi.dc_info.ref)
417 host = self._session._host
418 cookies = session.vim.client.cookiejar
420 images.fetch_image(
421 context,
422 vi.instance,
423 host,
424 session._port,
425 dc_path,
426 vi.datastore.name,
427 image_ds_loc.rel_path,
428 cookies=cookies)
430 def _fetch_image_as_vapp(self, context, vi, image_ds_loc):
431 """Download stream optimized image to host as a vApp."""
433 # The directory of the imported disk is the unique name
434 # of the VM use to import it with.
435 vm_name = image_ds_loc.parent.basename
437 LOG.debug("Downloading stream optimized image %(image_id)s to "
438 "%(file_path)s on the data store "
439 "%(datastore_name)s as vApp",
440 {'image_id': vi.ii.image_id,
441 'file_path': image_ds_loc,
442 'datastore_name': vi.datastore.name},
443 instance=vi.instance)
445 image_size = images.fetch_image_stream_optimized(
446 context,
447 vi.instance,
448 self._session,
449 vm_name,
450 vi.datastore.name,
451 vi.dc_info.vmFolder,
452 self._root_resource_pool)
453 # The size of the image is different from the size of the virtual disk.
454 # We want to use the latter. On vSAN this is the only way to get this
455 # size because there is no VMDK descriptor.
456 vi.ii.file_size = image_size
458 def _fetch_image_as_ova(self, context, vi, image_ds_loc):
459 """Download root disk of an OVA image as streamOptimized."""
461 # The directory of the imported disk is the unique name
462 # of the VM use to import it with.
463 vm_name = image_ds_loc.parent.basename
465 image_size = images.fetch_image_ova(context,
466 vi.instance,
467 self._session,
468 vm_name,
469 vi.datastore.name,
470 vi.dc_info.vmFolder,
471 self._root_resource_pool)
472 # The size of the image is different from the size of the virtual disk.
473 # We want to use the latter. On vSAN this is the only way to get this
474 # size because there is no VMDK descriptor.
475 vi.ii.file_size = image_size
477 def _prepare_sparse_image(self, vi):
478 tmp_dir_loc = vi.datastore.build_path(
479 self._tmp_folder, uuidutils.generate_uuid())
480 tmp_image_ds_loc = tmp_dir_loc.join(
481 vi.ii.image_id, "tmp-sparse.vmdk")
482 ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
483 return tmp_dir_loc, tmp_image_ds_loc
485 def _prepare_flat_image(self, vi):
486 tmp_dir_loc = vi.datastore.build_path(
487 self._tmp_folder, uuidutils.generate_uuid())
488 tmp_image_ds_loc = tmp_dir_loc.join(
489 vi.ii.image_id, vi.cache_image_path.basename)
490 ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
491 vm_util.create_virtual_disk(
492 self._session, vi.dc_info.ref,
493 vi.ii.adapter_type,
494 vi.ii.disk_type,
495 str(tmp_image_ds_loc),
496 vi.ii.file_size_in_kb)
497 flat_vmdk_name = vi.cache_image_path.basename.replace('.vmdk',
498 '-flat.vmdk')
499 flat_vmdk_ds_loc = tmp_dir_loc.join(vi.ii.image_id, flat_vmdk_name)
500 self._delete_datastore_file(str(flat_vmdk_ds_loc), vi.dc_info.ref)
501 return tmp_dir_loc, flat_vmdk_ds_loc
503 def _prepare_stream_optimized_image(self, vi):
504 vm_name = "%s_%s" % (constants.IMAGE_VM_PREFIX,
505 uuidutils.generate_uuid())
506 tmp_dir_loc = vi.datastore.build_path(vm_name)
507 tmp_image_ds_loc = tmp_dir_loc.join("%s.vmdk" % tmp_dir_loc.basename)
508 return tmp_dir_loc, tmp_image_ds_loc
510 def _prepare_iso_image(self, vi):
511 tmp_dir_loc = vi.datastore.build_path(
512 self._tmp_folder, uuidutils.generate_uuid())
513 tmp_image_ds_loc = tmp_dir_loc.join(
514 vi.ii.image_id, vi.cache_image_path.basename)
515 return tmp_dir_loc, tmp_image_ds_loc
517 def _move_to_cache(self, dc_ref, src_folder_ds_path, dst_folder_ds_path):
518 try:
519 ds_util.file_move(self._session, dc_ref,
520 src_folder_ds_path, dst_folder_ds_path)
521 except vexc.FileAlreadyExistsException:
522 # Folder move has failed. This may be due to the fact that a
523 # process or thread has already completed the operation.
524 # Since image caching is synchronized, this can only happen
525 # due to action external to the process.
526 # In the event of a FileAlreadyExists we continue,
527 # all other exceptions will be raised.
528 LOG.warning("Destination %s already exists! Concurrent moves "
529 "can lead to unexpected results.",
530 dst_folder_ds_path)
532 def _cache_sparse_image(self, vi, tmp_image_ds_loc):
533 tmp_dir_loc = tmp_image_ds_loc.parent.parent
534 converted_image_ds_loc = tmp_dir_loc.join(
535 vi.ii.image_id, vi.cache_image_path.basename)
536 # converts fetched image to preallocated disk
537 vm_util.copy_virtual_disk(
538 self._session,
539 vi.dc_info.ref,
540 str(tmp_image_ds_loc),
541 str(converted_image_ds_loc))
543 self._delete_datastore_file(str(tmp_image_ds_loc), vi.dc_info.ref)
545 self._move_to_cache(vi.dc_info.ref,
546 tmp_image_ds_loc.parent,
547 vi.cache_image_folder)
549 def _cache_flat_image(self, vi, tmp_image_ds_loc):
550 self._move_to_cache(vi.dc_info.ref,
551 tmp_image_ds_loc.parent,
552 vi.cache_image_folder)
554 def _cache_stream_optimized_image(self, vi, tmp_image_ds_loc):
555 dst_path = vi.cache_image_folder.join("%s.vmdk" % vi.ii.image_id)
556 ds_util.mkdir(self._session, vi.cache_image_folder, vi.dc_info.ref)
557 try:
558 ds_util.disk_move(self._session, vi.dc_info.ref,
559 tmp_image_ds_loc, dst_path)
560 except vexc.FileAlreadyExistsException:
561 pass
563 def _cache_iso_image(self, vi, tmp_image_ds_loc):
564 self._move_to_cache(vi.dc_info.ref,
565 tmp_image_ds_loc.parent,
566 vi.cache_image_folder)
568 def _get_vm_config_info(self, instance, image_info,
569 extra_specs):
570 """Captures all relevant information from the spawn parameters."""
572 if (instance.flavor.root_gb != 0 and
573 image_info.file_size > instance.flavor.root_gb * units.Gi):
574 reason = _("Image disk size greater than requested disk size")
575 raise exception.InstanceUnacceptable(instance_id=instance.uuid,
576 reason=reason)
577 allowed_ds_types = ds_util.get_allowed_datastore_types(
578 image_info.disk_type)
579 datastore = ds_util.get_datastore(self._session,
580 self._cluster,
581 self._datastore_regex,
582 extra_specs.storage_policy,
583 allowed_ds_types)
584 dc_info = self.get_datacenter_ref_and_name(datastore.ref)
586 return VirtualMachineInstanceConfigInfo(instance,
587 image_info,
588 datastore,
589 dc_info,
590 self._imagecache,
591 extra_specs)
593 def _get_image_callbacks(self, vi):
594 disk_type = vi.ii.disk_type
596 if vi.ii.is_ova:
597 image_fetch = self._fetch_image_as_ova
598 elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
599 image_fetch = self._fetch_image_as_vapp
600 elif vi.ii.vsphere_location:
601 image_fetch = self._fetch_vsphere_image
602 else:
603 image_fetch = self._fetch_image_as_file
605 if vi.ii.is_iso:
606 image_prepare = self._prepare_iso_image
607 image_cache = self._cache_iso_image
608 elif disk_type == constants.DISK_TYPE_SPARSE:
609 image_prepare = self._prepare_sparse_image
610 image_cache = self._cache_sparse_image
611 elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
612 image_prepare = self._prepare_stream_optimized_image
613 image_cache = self._cache_stream_optimized_image
614 elif disk_type in constants.SUPPORTED_FLAT_VARIANTS:
615 image_prepare = self._prepare_flat_image
616 image_cache = self._cache_flat_image
617 else:
618 reason = _("disk type '%s' not supported") % disk_type
619 raise exception.InvalidDiskInfo(reason=reason)
620 return image_prepare, image_fetch, image_cache
622 def _fetch_image_if_missing(self, context, vi):
623 image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi)
624 LOG.debug("Processing image %s", vi.ii.image_id, instance=vi.instance)
626 with lockutils.lock(str(vi.cache_image_path),
627 lock_file_prefix='nova-vmware-fetch_image'):
628 self.check_cache_folder(vi.datastore.name, vi.datastore.ref)
629 ds_browser = self._get_ds_browser(vi.datastore.ref)
630 if not ds_util.file_exists(self._session, ds_browser,
631 vi.cache_image_folder,
632 vi.cache_image_path.basename):
633 LOG.debug("Preparing fetch location", instance=vi.instance)
634 tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi)
635 LOG.debug("Fetch image to %s", tmp_image_ds_loc,
636 instance=vi.instance)
637 image_fetch(context, vi, tmp_image_ds_loc)
638 LOG.debug("Caching image", instance=vi.instance)
639 image_cache(vi, tmp_image_ds_loc)
640 LOG.debug("Cleaning up location %s", str(tmp_dir_loc),
641 instance=vi.instance)
642 self._delete_datastore_file(str(tmp_dir_loc), vi.dc_info.ref)
644 # The size of the sparse image is different from the size of the
645 # virtual disk. We want to use the latter.
646 if vi.ii.disk_type == constants.DISK_TYPE_SPARSE:
647 self._update_image_size(vi)
649 def _create_and_attach_thin_disk(self, instance, vm_ref, dc_info, size,
650 adapter_type, path):
651 disk_type = constants.DISK_TYPE_THIN
652 vm_util.create_virtual_disk(
653 self._session, dc_info.ref,
654 adapter_type,
655 disk_type,
656 path,
657 size)
659 self._volumeops.attach_disk_to_vm(
660 vm_ref, instance,
661 adapter_type, disk_type,
662 path, size, False)
664 def _create_ephemeral(self, bdi, instance, vm_ref, dc_info,
665 datastore, folder, adapter_type):
666 ephemerals = None
667 if bdi is not None:
668 ephemerals = driver.block_device_info_get_ephemerals(bdi)
669 for idx, eph in enumerate(ephemerals):
670 size = eph['size'] * units.Mi
671 at = eph.get('disk_bus') or adapter_type
672 filename = vm_util.get_ephemeral_name(idx)
673 path = str(ds_obj.DatastorePath(datastore.name, folder,
674 filename))
675 self._create_and_attach_thin_disk(instance, vm_ref, dc_info,
676 size, at, path)
678 # There may be block devices defined but no ephemerals. In this case
679 # we need to allocate an ephemeral disk if required
680 if not ephemerals and instance.flavor.ephemeral_gb:
681 size = instance.flavor.ephemeral_gb * units.Mi
682 filename = vm_util.get_ephemeral_name(0)
683 path = str(ds_obj.DatastorePath(datastore.name, folder,
684 filename))
685 self._create_and_attach_thin_disk(instance, vm_ref, dc_info, size,
686 adapter_type, path)
688 def _create_swap(self, bdi, instance, vm_ref, dc_info, datastore,
689 folder, adapter_type):
690 swap = None
691 filename = "swap.vmdk"
692 path = str(ds_obj.DatastorePath(datastore.name, folder, filename))
693 if bdi is not None:
694 swap = driver.block_device_info_get_swap(bdi)
695 if driver.swap_is_usable(swap):
696 size = swap['swap_size'] * units.Ki
697 self._create_and_attach_thin_disk(instance, vm_ref, dc_info,
698 size, adapter_type, path)
699 else:
700 # driver.block_device_info_get_swap returns
701 # {'device_name': None, 'swap_size': 0} if swap is None
702 # in block_device_info. If block_device_info does not contain
703 # a swap device, we need to reset swap to None, so we can
704 # extract the swap_size from the instance's flavor.
705 swap = None
707 size = instance.flavor.swap * units.Ki
708 if not swap and size > 0:
709 self._create_and_attach_thin_disk(instance, vm_ref, dc_info, size,
710 adapter_type, path)
712 def _update_vnic_index(self, context, instance, network_info):
713 if network_info:
714 for index, vif in enumerate(network_info):
715 self._network_api.update_instance_vnic_index(
716 context, instance, vif, index)
718 def _update_image_size(self, vi):
719 """Updates the file size of the specified image."""
720 # The size of the Glance image is different from the deployed VMDK
721 # size for sparse, streamOptimized and OVA images. We need to retrieve
722 # the size of the flat VMDK and update the file_size property of the
723 # image. This ensures that further operations involving size checks
724 # and disk resizing will work as expected.
725 ds_browser = self._get_ds_browser(vi.datastore.ref)
726 flat_file = "%s-flat.vmdk" % vi.ii.image_id
727 new_size = ds_util.file_size(self._session, ds_browser,
728 vi.cache_image_folder, flat_file)
729 if new_size is not None:
730 vi.ii.file_size = new_size
732 def prepare_for_spawn(self, instance):
733 if (int(instance.flavor.memory_mb) % 4 != 0):
734 reason = _("Memory size is not multiple of 4")
735 raise exception.InstanceUnacceptable(instance_id=instance.uuid,
736 reason=reason)
738 def spawn(self, context, instance, image_meta, injected_files,
739 admin_password, network_info, block_device_info=None):
741 client_factory = self._session.vim.client.factory
742 image_info = images.VMwareImage.from_image(context,
743 instance.image_ref,
744 image_meta)
745 extra_specs = self._get_extra_specs(instance.flavor, image_meta)
747 vi = self._get_vm_config_info(instance, image_info,
748 extra_specs)
750 metadata = self._get_instance_metadata(context, instance)
751 # Creates the virtual machine. The virtual machine reference returned
752 # is unique within Virtual Center.
753 vm_ref = self.build_virtual_machine(instance,
754 image_info,
755 vi.dc_info,
756 vi.datastore,
757 network_info,
758 extra_specs,
759 metadata)
761 # Cache the vm_ref. This saves a remote call to the VC. This uses the
762 # instance uuid.
763 vm_util.vm_ref_cache_update(instance.uuid, vm_ref)
765 # Update the Neutron VNIC index
766 self._update_vnic_index(context, instance, network_info)
768 # Set the machine.id parameter of the instance to inject
769 # the NIC configuration inside the VM
770 if CONF.flat_injected:
771 self._set_machine_id(client_factory, instance, network_info,
772 vm_ref=vm_ref)
774 # Set the vnc configuration of the instance, vnc port starts from 5900
775 if CONF.vnc.enabled:
776 self._get_and_set_vnc_config(client_factory, instance, vm_ref)
778 block_device_mapping = []
779 if block_device_info is not None:
780 block_device_mapping = driver.block_device_info_get_mapping(
781 block_device_info)
783 if instance.image_ref:
784 self._imagecache.enlist_image(
785 image_info.image_id, vi.datastore, vi.dc_info.ref)
786 self._fetch_image_if_missing(context, vi)
788 if image_info.is_iso:
789 self._use_iso_image(vm_ref, vi)
790 elif image_info.linked_clone:
791 self._use_disk_image_as_linked_clone(vm_ref, vi)
792 else:
793 self._use_disk_image_as_full_clone(vm_ref, vi)
795 if block_device_mapping:
796 msg = "Block device information present: %s" % block_device_info
797 # NOTE(mriedem): block_device_info can contain an auth_password
798 # so we have to scrub the message before logging it.
799 LOG.debug(strutils.mask_password(msg), instance=instance)
801 # Before attempting to attach any volume, make sure the
802 # block_device_mapping (i.e. disk_bus) is valid
803 self._is_bdm_valid(block_device_mapping)
805 for disk in block_device_mapping:
806 connection_info = disk['connection_info']
807 adapter_type = disk.get('disk_bus') or vi.ii.adapter_type
809 # TODO(hartsocks): instance is unnecessary, remove it
810 # we still use instance in many locations for no other purpose
811 # than logging, can we simplify this?
812 if disk.get('boot_index') == 0:
813 self._volumeops.attach_root_volume(connection_info,
814 instance, vi.datastore.ref, adapter_type)
815 else:
816 self._volumeops.attach_volume(connection_info,
817 instance, adapter_type)
819 # Create ephemeral disks
820 self._create_ephemeral(block_device_info, instance, vm_ref,
821 vi.dc_info, vi.datastore, instance.uuid,
822 vi.ii.adapter_type)
823 self._create_swap(block_device_info, instance, vm_ref, vi.dc_info,
824 vi.datastore, instance.uuid, vi.ii.adapter_type)
826 if configdrive.required_by(instance):
827 self._configure_config_drive(
828 context, instance, vm_ref, vi.dc_info, vi.datastore,
829 injected_files, admin_password, network_info)
831 # Rename the VM. This is done after the spec is created to ensure
832 # that all of the files for the instance are under the directory
833 # 'uuid' of the instance
834 vm_util.rename_vm(self._session, vm_ref, instance)
836 vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
838 def _is_bdm_valid(self, block_device_mapping):
839 """Checks if the block device mapping is valid."""
840 valid_bus = (constants.DEFAULT_ADAPTER_TYPE,
841 constants.ADAPTER_TYPE_BUSLOGIC,
842 constants.ADAPTER_TYPE_IDE,
843 constants.ADAPTER_TYPE_LSILOGICSAS,
844 constants.ADAPTER_TYPE_PARAVIRTUAL)
846 for disk in block_device_mapping:
847 adapter_type = disk.get('disk_bus')
848 if (adapter_type is not None and adapter_type not in valid_bus):
849 raise exception.UnsupportedHardware(model=adapter_type,
850 virt="vmware")
852 def _create_config_drive(self, context, instance, injected_files,
853 admin_password, network_info, data_store_name,
854 dc_name, upload_folder, cookies):
855 if CONF.config_drive_format != 'iso9660':
856 reason = (_('Invalid config_drive_format "%s"') %
857 CONF.config_drive_format)
858 raise exception.InstancePowerOnFailure(reason=reason)
860 LOG.info('Using config drive for instance', instance=instance)
861 extra_md = {}
862 if admin_password:
863 extra_md['admin_pass'] = admin_password
865 inst_md = instance_metadata.InstanceMetadata(instance,
866 content=injected_files,
867 extra_md=extra_md,
868 network_info=network_info)
869 try:
870 with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
871 with utils.tempdir() as tmp_path:
872 tmp_file = os.path.join(tmp_path, 'configdrive.iso')
873 cdb.make_drive(tmp_file)
874 upload_iso_path = "%s/configdrive.iso" % (
875 upload_folder)
876 images.upload_iso_to_datastore(
877 tmp_file, instance,
878 host=self._session._host,
879 port=self._session._port,
880 data_center_name=dc_name,
881 datastore_name=data_store_name,
882 cookies=cookies,
883 file_path=upload_iso_path)
884 return upload_iso_path
885 except Exception as e:
886 with excutils.save_and_reraise_exception():
887 LOG.error('Creating config drive failed with error: %s',
888 e, instance=instance)
890 def _attach_cdrom_to_vm(self, vm_ref, instance,
891 datastore, file_path):
892 """Attach cdrom to VM by reconfiguration."""
893 client_factory = self._session.vim.client.factory
894 devices = vm_util.get_hardware_devices(self._session, vm_ref)
895 (controller_key, unit_number,
896 controller_spec) = vm_util.allocate_controller_key_and_unit_number(
897 client_factory,
898 devices,
899 constants.ADAPTER_TYPE_IDE)
900 cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
901 client_factory, datastore, file_path,
902 controller_key, unit_number)
903 if controller_spec:
904 cdrom_attach_config_spec.deviceChange.append(controller_spec)
906 LOG.debug("Reconfiguring VM instance to attach cdrom %s",
907 file_path, instance=instance)
908 vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
909 LOG.debug("Reconfigured VM instance to attach cdrom %s",
910 file_path, instance=instance)
912 def _create_vm_snapshot(self, instance, vm_ref):
913 LOG.debug("Creating Snapshot of the VM instance", instance=instance)
914 snapshot_task = self._session._call_method(
915 self._session.vim,
916 "CreateSnapshot_Task", vm_ref,
917 name="%s-snapshot" % instance.uuid,
918 description="Taking Snapshot of the VM",
919 memory=False,
920 quiesce=True)
921 self._session._wait_for_task(snapshot_task)
922 LOG.debug("Created Snapshot of the VM instance", instance=instance)
923 task_info = self._session._call_method(vutil,
924 "get_object_property",
925 snapshot_task,
926 "info")
927 snapshot = task_info.result
928 return snapshot
930 @retry_if_task_in_progress
931 def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
932 LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
933 delete_snapshot_task = self._session._call_method(
934 self._session.vim,
935 "RemoveSnapshot_Task", snapshot,
936 removeChildren=False, consolidate=True)
937 self._session._wait_for_task(delete_snapshot_task)
938 LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
940 def _create_linked_clone_from_snapshot(self, instance,
941 vm_ref, snapshot_ref, dc_info):
942 """Create linked clone VM to be deployed to same ds as source VM
943 """
944 client_factory = self._session.vim.client.factory
945 rel_spec = vm_util.relocate_vm_spec(
946 client_factory,
947 datastore=None,
948 host=None,
949 disk_move_type="createNewChildDiskBacking")
950 clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec,
951 power_on=False, snapshot=snapshot_ref, template=True)
952 vm_name = "%s_%s" % (constants.SNAPSHOT_VM_PREFIX,
953 uuidutils.generate_uuid())
955 LOG.debug("Creating linked-clone VM from snapshot", instance=instance)
956 vm_clone_task = self._session._call_method(
957 self._session.vim,
958 "CloneVM_Task",
959 vm_ref,
960 folder=dc_info.vmFolder,
961 name=vm_name,
962 spec=clone_spec)
963 self._session._wait_for_task(vm_clone_task)
964 LOG.info("Created linked-clone VM from snapshot", instance=instance)
965 task_info = self._session._call_method(vutil,
966 "get_object_property",
967 vm_clone_task,
968 "info")
969 return task_info.result
971 def snapshot(self, context, instance, image_id, update_task_state):
972 """Create snapshot from a running VM instance.
974 Steps followed are:
976 1. Get the name of the vmdk file which the VM points to right now.
977 Can be a chain of snapshots, so we need to know the last in the
978 chain.
979 2. Create the snapshot. A new vmdk is created which the VM points to
980 now. The earlier vmdk becomes read-only.
981 3. Creates a linked clone VM from the snapshot
982 4. Exports the disk in the link clone VM as a streamOptimized disk.
983 5. Delete the linked clone VM
984 6. Deletes the snapshot in original instance.
985 """
986 vm_ref = vm_util.get_vm_ref(self._session, instance)
988 def _get_vm_and_vmdk_attribs():
989 # Get the vmdk info that the VM is pointing to
990 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
991 if not vmdk.path:
992 LOG.debug("No root disk defined. Unable to snapshot.",
993 instance=instance)
994 raise error_util.NoRootDiskDefined()
996 lst_properties = ["datastore", "summary.config.guestId"]
997 props = self._session._call_method(vutil,
998 "get_object_properties_dict",
999 vm_ref,
1000 lst_properties)
1001 os_type = props['summary.config.guestId']
1002 datastores = props['datastore']
1003 return (vmdk, datastores, os_type)
1005 vmdk, datastores, os_type = _get_vm_and_vmdk_attribs()
1006 ds_ref = datastores.ManagedObjectReference[0]
1007 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1009 update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
1011 # TODO(vui): convert to creating plain vm clone and uploading from it
1012 # instead of using live vm snapshot.
1013 snapshot_ref = self._create_vm_snapshot(instance, vm_ref)
1015 update_task_state(task_state=task_states.IMAGE_UPLOADING,
1016 expected_state=task_states.IMAGE_PENDING_UPLOAD)
1017 snapshot_vm_ref = None
1019 try:
1020 # Create a temporary VM (linked clone from snapshot), then export
1021 # the VM's root disk to glance via HttpNfc API
1022 snapshot_vm_ref = self._create_linked_clone_from_snapshot(
1023 instance, vm_ref, snapshot_ref, dc_info)
1024 images.upload_image_stream_optimized(
1025 context, image_id, instance, self._session, vm=snapshot_vm_ref,
1026 vmdk_size=vmdk.capacity_in_bytes)
1027 finally:
1028 if snapshot_vm_ref:
1029 vm_util.destroy_vm(self._session, instance, snapshot_vm_ref)
1030 # Deleting the snapshot after destroying the temporary VM created
1031 # based on it allows the instance vm's disks to be consolidated.
1032 # TODO(vui) Add handling for when vmdk volume is attached.
1033 self._delete_vm_snapshot(instance, vm_ref, snapshot_ref)
1035 def reboot(self, instance, network_info, reboot_type="SOFT"):
1036 """Reboot a VM instance."""
1037 vm_ref = vm_util.get_vm_ref(self._session, instance)
1038 lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
1039 "summary.guest.toolsRunningStatus"]
1040 props = self._session._call_method(vutil,
1041 "get_object_properties_dict",
1042 vm_ref,
1043 lst_properties)
1044 pwr_state = props.get('runtime.powerState')
1045 tools_status = props.get('summary.guest.toolsStatus')
1046 tools_running_status = props.get('summary.guest.toolsRunningStatus')
1048 # Raise an exception if the VM is not powered On.
1049 if pwr_state not in ["poweredOn"]:
1050 reason = _("instance is not powered on")
1051 raise exception.InstanceRebootFailure(reason=reason)
1053 # If latest vmware tools are installed in the VM, and that the tools
1054 # are running, then only do a guest reboot. Otherwise do a hard reset.
1055 if (tools_status == "toolsOk" and
1056 tools_running_status == "guestToolsRunning" and
1057 reboot_type == "SOFT"):
1058 LOG.debug("Rebooting guest OS of VM", instance=instance)
1059 self._session._call_method(self._session.vim, "RebootGuest",
1060 vm_ref)
1061 LOG.debug("Rebooted guest OS of VM", instance=instance)
1062 else:
1063 LOG.debug("Doing hard reboot of VM", instance=instance)
1064 reset_task = self._session._call_method(self._session.vim,
1065 "ResetVM_Task", vm_ref)
1066 self._session._wait_for_task(reset_task)
1067 LOG.debug("Did hard reboot of VM", instance=instance)
1069 def _destroy_instance(self, instance, destroy_disks=True):
1070 # Destroy a VM instance
1071 try:
1072 vm_ref = vm_util.get_vm_ref(self._session, instance)
1073 lst_properties = ["config.files.vmPathName", "runtime.powerState",
1074 "datastore"]
1075 props = self._session._call_method(vutil,
1076 "get_object_properties_dict",
1077 vm_ref,
1078 lst_properties)
1079 pwr_state = props['runtime.powerState']
1081 vm_config_pathname = props.get('config.files.vmPathName')
1082 vm_ds_path = None
1083 if vm_config_pathname is not None:
1084 vm_ds_path = ds_obj.DatastorePath.parse(
1085 vm_config_pathname)
1087 # Power off the VM if it is in PoweredOn state.
1088 if pwr_state == "poweredOn":
1089 vm_util.power_off_instance(self._session, instance, vm_ref)
1091 # Un-register the VM
1092 try:
1093 LOG.debug("Unregistering the VM", instance=instance)
1094 self._session._call_method(self._session.vim,
1095 "UnregisterVM", vm_ref)
1096 LOG.debug("Unregistered the VM", instance=instance)
1097 except Exception as excep:
1098 LOG.warning("In vmwareapi:vmops:_destroy_instance, got "
1099 "this exception while un-registering the VM: %s",
1100 excep, instance=instance)
1101 # Delete the folder holding the VM related content on
1102 # the datastore.
1103 if destroy_disks and vm_ds_path:
1104 try:
1105 dir_ds_compliant_path = vm_ds_path.parent
1106 LOG.debug("Deleting contents of the VM from "
1107 "datastore %(datastore_name)s",
1108 {'datastore_name': vm_ds_path.datastore},
1109 instance=instance)
1110 ds_ref_ret = props['datastore']
1111 ds_ref = ds_ref_ret.ManagedObjectReference[0]
1112 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1113 ds_util.file_delete(self._session,
1114 dir_ds_compliant_path,
1115 dc_info.ref)
1116 LOG.debug("Deleted contents of the VM from "
1117 "datastore %(datastore_name)s",
1118 {'datastore_name': vm_ds_path.datastore},
1119 instance=instance)
1120 except Exception:
1121 LOG.warning("In vmwareapi:vmops:_destroy_instance, "
1122 "exception while deleting the VM contents "
1123 "from the disk",
1124 exc_info=True, instance=instance)
1125 except exception.InstanceNotFound:
1126 LOG.warning('Instance does not exist on backend',
1127 instance=instance)
1128 except Exception:
1129 LOG.exception('Destroy instance failed', instance=instance)
1130 finally:
1131 vm_util.vm_ref_cache_delete(instance.uuid)
1133 def destroy(self, instance, destroy_disks=True):
1134 """Destroy a VM instance.
1136 Steps followed for each VM are:
1137 1. Power off, if it is in poweredOn state.
1138 2. Un-register.
1139 3. Delete the contents of the folder holding the VM related data.
1140 """
1141 LOG.debug("Destroying instance", instance=instance)
1142 self._destroy_instance(instance, destroy_disks=destroy_disks)
1143 LOG.debug("Instance destroyed", instance=instance)
1145 def pause(self, instance):
1146 msg = _("pause not supported for vmwareapi")
1147 raise NotImplementedError(msg)
1149 def unpause(self, instance):
1150 msg = _("unpause not supported for vmwareapi")
1151 raise NotImplementedError(msg)
1153 def suspend(self, instance):
1154 """Suspend the specified instance."""
1155 vm_ref = vm_util.get_vm_ref(self._session, instance)
1156 pwr_state = self._session._call_method(vutil,
1157 "get_object_property",
1158 vm_ref,
1159 "runtime.powerState")
1160 # Only PoweredOn VMs can be suspended.
1161 if pwr_state == "poweredOn":
1162 LOG.debug("Suspending the VM", instance=instance)
1163 suspend_task = self._session._call_method(self._session.vim,
1164 "SuspendVM_Task", vm_ref)
1165 self._session._wait_for_task(suspend_task)
1166 LOG.debug("Suspended the VM", instance=instance)
1167 # Raise Exception if VM is poweredOff
1168 elif pwr_state == "poweredOff":
1169 reason = _("instance is powered off and cannot be suspended.")
1170 raise exception.InstanceSuspendFailure(reason=reason)
1171 else:
1172 LOG.debug("VM was already in suspended state. So returning "
1173 "without doing anything", instance=instance)
1175 def resume(self, instance):
1176 """Resume the specified instance."""
1177 vm_ref = vm_util.get_vm_ref(self._session, instance)
1178 pwr_state = self._session._call_method(vutil,
1179 "get_object_property",
1180 vm_ref,
1181 "runtime.powerState")
1182 if pwr_state.lower() == "suspended":
1183 LOG.debug("Resuming the VM", instance=instance)
1184 suspend_task = self._session._call_method(
1185 self._session.vim,
1186 "PowerOnVM_Task", vm_ref)
1187 self._session._wait_for_task(suspend_task)
1188 LOG.debug("Resumed the VM", instance=instance)
1189 else:
1190 reason = _("instance is not in a suspended state")
1191 raise exception.InstanceResumeFailure(reason=reason)
1193 def _get_rescue_device(self, instance, vm_ref):
1194 hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref)
1195 return vm_util.find_rescue_device(hardware_devices,
1196 instance)
1198 def rescue(self, context, instance, network_info, image_meta):
1199 """Rescue the specified instance.
1201 Attach the image that the instance was created from and boot from it.
1202 """
1203 vm_ref = vm_util.get_vm_ref(self._session, instance)
1205 # Get the root disk vmdk object
1206 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
1207 ds_ref = vmdk.device.backing.datastore
1208 datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref)
1209 dc_info = self.get_datacenter_ref_and_name(datastore.ref)
1211 # Get the image details of the instance
1212 image_info = images.VMwareImage.from_image(context,
1213 image_meta.id,
1214 image_meta)
1215 vi = VirtualMachineInstanceConfigInfo(instance,
1216 image_info,
1217 datastore,
1218 dc_info,
1219 self._imagecache)
1220 vm_util.power_off_instance(self._session, instance, vm_ref)
1222 # Fetch the image if it does not exist in the cache
1223 self._fetch_image_if_missing(context, vi)
1225 # Get the rescue disk path
1226 vm_folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
1227 rescue_disk_path = datastore.build_path(vm_folder,
1228 "%s-rescue.%s" % (image_info.image_id, image_info.file_type))
1230 # Copy the cached image to the be the rescue disk. This will be used
1231 # as the rescue disk for the instance.
1232 ds_util.disk_copy(self._session, dc_info.ref,
1233 vi.cache_image_path, rescue_disk_path)
1234 # Attach the rescue disk to the instance
1235 self._volumeops.attach_disk_to_vm(vm_ref, instance, vmdk.adapter_type,
1236 vmdk.disk_type, rescue_disk_path)
1237 # Get the rescue device and configure the boot order to
1238 # boot from this device
1239 rescue_device = self._get_rescue_device(instance, vm_ref)
1240 factory = self._session.vim.client.factory
1241 boot_spec = vm_util.get_vm_boot_spec(factory, rescue_device)
1242 # Update the VM with the new boot order and power on
1243 vm_util.reconfigure_vm(self._session, vm_ref, boot_spec)
1244 vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
1246 def unrescue(self, instance, power_on=True):
1247 """Unrescue the specified instance."""
1249 vm_ref = vm_util.get_vm_ref(self._session, instance)
1250 # Get the rescue device and detach it from the instance.
1251 try:
1252 rescue_device = self._get_rescue_device(instance, vm_ref)
1253 except exception.NotFound:
1254 with excutils.save_and_reraise_exception():
1255 LOG.error('Unable to access the rescue disk',
1256 instance=instance)
1257 vm_util.power_off_instance(self._session, instance, vm_ref)
1258 self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device,
1259 destroy_disk=True)
1260 if power_on:
1261 vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
1263 def power_off(self, instance, timeout=0, retry_interval=0):
1264 """Power off the specified instance.
1266 :param instance: nova.objects.instance.Instance
1267 :param timeout: How long to wait in seconds for the instance to
1268 shutdown
1269 :param retry_interval: Interval to check if instance is already
1270 shutdown in seconds.
1271 """
1272 if timeout and self._clean_shutdown(instance,
1273 timeout,
1274 retry_interval):
1275 return
1277 vm_util.power_off_instance(self._session, instance)
1279 def _clean_shutdown(self, instance, timeout, retry_interval):
1280 """Perform a soft shutdown on the VM.
1281 :param instance: nova.objects.instance.Instance
1282 :param timeout: How long to wait in seconds for the instance to
1283 shutdown
1284 :param retry_interval: Interval to check if instance is already
1285 shutdown in seconds.
1286 :return: True if the instance was shutdown within time limit,
1287 False otherwise.
1288 """
1289 LOG.debug("Performing Soft shutdown on instance",
1290 instance=instance)
1291 vm_ref = vm_util.get_vm_ref(self._session, instance)
1293 props = self._get_instance_props(vm_ref)
1295 if props.get("runtime.powerState") != "poweredOn":
1296 LOG.debug("Instance not in poweredOn state.",
1297 instance=instance)
1298 return False
1300 if ((props.get("summary.guest.toolsStatus") == "toolsOk") and
1301 (props.get("summary.guest.toolsRunningStatus") ==
1302 "guestToolsRunning")):
1304 LOG.debug("Soft shutdown instance, timeout: %d",
1305 timeout, instance=instance)
1306 self._session._call_method(self._session.vim,
1307 "ShutdownGuest",
1308 vm_ref)
1310 while timeout > 0:
1311 wait_time = min(retry_interval, timeout)
1312 props = self._get_instance_props(vm_ref)
1314 if props.get("runtime.powerState") == "poweredOff":
1315 LOG.info("Soft shutdown succeeded.",
1316 instance=instance)
1317 return True
1319 time.sleep(wait_time)
1320 timeout -= retry_interval
1322 LOG.warning("Timed out while waiting for soft shutdown.",
1323 instance=instance)
1324 else:
1325 LOG.debug("VMware Tools not running", instance=instance)
1327 return False
1329 def _get_instance_props(self, vm_ref):
1330 lst_properties = ["summary.guest.toolsStatus",
1331 "runtime.powerState",
1332 "summary.guest.toolsRunningStatus"]
1333 return self._session._call_method(vutil,
1334 "get_object_properties_dict",
1335 vm_ref, lst_properties)
1337 def power_on(self, instance):
1338 vm_util.power_on_instance(self._session, instance)
1340 def _update_instance_progress(self, context, instance, step, total_steps):
1341 """Update instance progress percent to reflect current step number
1342 """
1343 # Divide the action's workflow into discrete steps and "bump" the
1344 # instance's progress field as each step is completed.
1345 #
1346 # For a first cut this should be fine, however, for large VM images,
1347 # the clone disk step begins to dominate the equation. A
1348 # better approximation would use the percentage of the VM image that
1349 # has been streamed to the destination host.
1350 progress = round(float(step) / total_steps * 100)
1351 instance_uuid = instance.uuid
1352 LOG.debug("Updating instance '%(instance_uuid)s' progress to"
1353 " %(progress)d",
1354 {'instance_uuid': instance_uuid, 'progress': progress},
1355 instance=instance)
1356 instance.progress = progress
1357 instance.save()
1359 def _resize_vm(self, context, instance, vm_ref, flavor, image_meta):
1360 """Resizes the VM according to the flavor."""
1361 client_factory = self._session.vim.client.factory
1362 extra_specs = self._get_extra_specs(flavor, image_meta)
1363 metadata = self._get_instance_metadata(context, instance,
1364 flavor=flavor)
1365 vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
1366 int(flavor.vcpus),
1367 int(flavor.memory_mb),
1368 extra_specs,
1369 metadata=metadata)
1370 vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
1372 def _resize_disk(self, instance, vm_ref, vmdk, flavor):
1373 extra_specs = self._get_extra_specs(instance.flavor,
1374 instance.image_meta)
1375 if (flavor.root_gb > instance.flavor.root_gb and
1376 flavor.root_gb > vmdk.capacity_in_bytes / units.Gi):
1377 root_disk_in_kb = flavor.root_gb * units.Mi
1378 ds_ref = vmdk.device.backing.datastore
1379 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1380 folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
1381 datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
1382 resized_disk = str(ds_obj.DatastorePath(datastore, folder,
1383 'resized.vmdk'))
1384 ds_util.disk_copy(self._session, dc_info.ref, vmdk.path,
1385 str(resized_disk))
1386 self._extend_virtual_disk(instance, root_disk_in_kb, resized_disk,
1387 dc_info.ref)
1388 self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
1389 original_disk = str(ds_obj.DatastorePath(datastore, folder,
1390 'original.vmdk'))
1391 ds_util.disk_move(self._session, dc_info.ref, vmdk.path,
1392 original_disk)
1393 ds_util.disk_move(self._session, dc_info.ref, resized_disk,
1394 vmdk.path)
1395 else:
1396 self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
1398 self._volumeops.attach_disk_to_vm(
1399 vm_ref, instance, vmdk.adapter_type, vmdk.disk_type, vmdk.path,
1400 disk_io_limits=extra_specs.disk_io_limits)
1402 def _remove_ephemerals_and_swap(self, vm_ref):
1403 devices = vm_util.get_ephemerals(self._session, vm_ref)
1404 swap = vm_util.get_swap(self._session, vm_ref)
1405 if swap is not None:
1406 devices.append(swap)
1408 if devices:
1409 vm_util.detach_devices_from_vm(self._session, vm_ref, devices)
1411 def _resize_create_ephemerals_and_swap(self, vm_ref, instance,
1412 block_device_info):
1413 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
1414 if not vmdk.device:
1415 LOG.debug("No root disk attached!", instance=instance)
1416 return
1417 ds_ref = vmdk.device.backing.datastore
1418 datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref)
1419 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1420 folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
1421 self._create_ephemeral(block_device_info, instance, vm_ref,
1422 dc_info, datastore, folder, vmdk.adapter_type)
1423 self._create_swap(block_device_info, instance, vm_ref, dc_info,
1424 datastore, folder, vmdk.adapter_type)
1426 def migrate_disk_and_power_off(self, context, instance, dest,
1427 flavor):
1428 """Transfers the disk of a running instance in multiple phases, turning
1429 off the instance before the end.
1430 """
1431 vm_ref = vm_util.get_vm_ref(self._session, instance)
1432 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
1434 # Checks if the migration needs a disk resize down.
1435 if (flavor.root_gb < instance.flavor.root_gb or
1436 (flavor.root_gb != 0 and
1437 flavor.root_gb < vmdk.capacity_in_bytes / units.Gi)):
1438 reason = _("Unable to shrink disk.")
1439 raise exception.InstanceFaultRollback(
1440 exception.ResizeError(reason=reason))
1442 # TODO(garyk): treat dest parameter. Migration needs to be treated.
1444 # 0. Zero out the progress to begin
1445 self._update_instance_progress(context, instance,
1446 step=0,
1447 total_steps=RESIZE_TOTAL_STEPS)
1449 # 1. Power off the instance
1450 vm_util.power_off_instance(self._session, instance, vm_ref)
1451 self._update_instance_progress(context, instance,
1452 step=1,
1453 total_steps=RESIZE_TOTAL_STEPS)
1455 # 2. Reconfigure the VM properties
1456 self._resize_vm(context, instance, vm_ref, flavor, instance.image_meta)
1458 self._update_instance_progress(context, instance,
1459 step=2,
1460 total_steps=RESIZE_TOTAL_STEPS)
1462 # 3.Reconfigure the disk properties
1463 self._resize_disk(instance, vm_ref, vmdk, flavor)
1464 self._update_instance_progress(context, instance,
1465 step=3,
1466 total_steps=RESIZE_TOTAL_STEPS)
1468 # 4. Purge ephemeral and swap disks
1469 self._remove_ephemerals_and_swap(vm_ref)
1470 self._update_instance_progress(context, instance,
1471 step=4,
1472 total_steps=RESIZE_TOTAL_STEPS)
1474 def confirm_migration(self, migration, instance, network_info):
1475 """Confirms a resize, destroying the source VM."""
1476 vm_ref = vm_util.get_vm_ref(self._session, instance)
1477 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
1478 if not vmdk.device:
1479 return
1480 ds_ref = vmdk.device.backing.datastore
1481 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1482 folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
1483 datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
1484 original_disk = ds_obj.DatastorePath(datastore, folder,
1485 'original.vmdk')
1486 ds_browser = self._get_ds_browser(ds_ref)
1487 if ds_util.file_exists(self._session, ds_browser,
1488 original_disk.parent,
1489 original_disk.basename):
1490 ds_util.disk_delete(self._session, dc_info.ref,
1491 str(original_disk))
1493 def _revert_migration_update_disks(self, vm_ref, instance, vmdk,
1494 block_device_info):
1495 extra_specs = self._get_extra_specs(instance.flavor,
1496 instance.image_meta)
1497 ds_ref = vmdk.device.backing.datastore
1498 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1499 folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
1500 datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
1501 original_disk = ds_obj.DatastorePath(datastore, folder,
1502 'original.vmdk')
1503 ds_browser = self._get_ds_browser(ds_ref)
1504 if ds_util.file_exists(self._session, ds_browser,
1505 original_disk.parent,
1506 original_disk.basename):
1507 self._volumeops.detach_disk_from_vm(vm_ref, instance,
1508 vmdk.device)
1509 ds_util.disk_delete(self._session, dc_info.ref, vmdk.path)
1510 ds_util.disk_move(self._session, dc_info.ref,
1511 str(original_disk), vmdk.path)
1512 else:
1513 self._volumeops.detach_disk_from_vm(vm_ref, instance,
1514 vmdk.device)
1515 self._volumeops.attach_disk_to_vm(
1516 vm_ref, instance, vmdk.adapter_type, vmdk.disk_type, vmdk.path,
1517 disk_io_limits=extra_specs.disk_io_limits)
1518 # Reconfigure ephemerals
1519 self._remove_ephemerals_and_swap(vm_ref)
1520 self._resize_create_ephemerals_and_swap(vm_ref, instance,
1521 block_device_info)
1523 def finish_revert_migration(self, context, instance, network_info,
1524 block_device_info, power_on=True):
1525 """Finish reverting a resize."""
1526 vm_ref = vm_util.get_vm_ref(self._session, instance)
1527 # Ensure that the VM is off
1528 vm_util.power_off_instance(self._session, instance, vm_ref)
1529 client_factory = self._session.vim.client.factory
1530 # Reconfigure the VM properties
1531 extra_specs = self._get_extra_specs(instance.flavor,
1532 instance.image_meta)
1533 metadata = self._get_instance_metadata(context, instance)
1534 vm_resize_spec = vm_util.get_vm_resize_spec(
1535 client_factory,
1536 int(instance.flavor.vcpus),
1537 int(instance.flavor.memory_mb),
1538 extra_specs,
1539 metadata=metadata)
1540 vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
1542 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
1543 if vmdk.device:
1544 self._revert_migration_update_disks(vm_ref, instance, vmdk,
1545 block_device_info)
1547 if power_on:
1548 vm_util.power_on_instance(self._session, instance)
1550 def finish_migration(self, context, migration, instance, disk_info,
1551 network_info, image_meta, resize_instance=False,
1552 block_device_info=None, power_on=True):
1553 """Completes a resize, turning on the migrated instance."""
1554 vm_ref = vm_util.get_vm_ref(self._session, instance)
1556 # 5. Update ephemerals if necessary
1557 self._resize_create_ephemerals_and_swap(vm_ref, instance,
1558 block_device_info)
1560 self._update_instance_progress(context, instance,
1561 step=5,
1562 total_steps=RESIZE_TOTAL_STEPS)
1563 # 6. Start VM
1564 if power_on:
1565 vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
1567 self._update_instance_progress(context, instance,
1568 step=6,
1569 total_steps=RESIZE_TOTAL_STEPS)
1571 def _find_esx_host(self, cluster_ref, ds_ref):
1572 """Find ESX host in the specified cluster which is also connected to
1573 the specified datastore.
1574 """
1575 cluster_hosts = self._session._call_method(vutil,
1576 'get_object_property',
1577 cluster_ref, 'host')
1578 ds_hosts = self._session._call_method(vutil, 'get_object_property',
1579 ds_ref, 'host')
1580 for ds_host in ds_hosts.DatastoreHostMount:
1581 ds_host_ref_value = vutil.get_moref_value(ds_host.key)
1582 for cluster_host in cluster_hosts.ManagedObjectReference:
1583 if ds_host_ref_value == vutil.get_moref_value(cluster_host):
1584 return cluster_host
1586 def _find_datastore_for_migration(self, instance, vm_ref, cluster_ref,
1587 datastore_regex):
1588 """Find datastore in the specified cluster where the instance will be
1589 migrated to. Return the current datastore if it is already connected to
1590 the specified cluster.
1591 """
1592 vmdk = vm_util.get_vmdk_info(self._session, vm_ref)
1593 ds_ref = vmdk.device.backing.datastore
1594 cluster_datastores = self._session._call_method(vutil,
1595 'get_object_property',
1596 cluster_ref,
1597 'datastore')
1598 if not cluster_datastores:
1599 LOG.warning('No datastores found in the destination cluster')
1600 return None
1601 # check if the current datastore is connected to the destination
1602 # cluster
1603 ds_ref_value = vutil.get_moref_value(ds_ref)
1604 for datastore in cluster_datastores.ManagedObjectReference:
1605 if vutil.get_moref_value(datastore) == ds_ref_value:
1606 ds = ds_obj.get_datastore_by_ref(self._session, ds_ref)
1607 if (datastore_regex is None or
1608 datastore_regex.match(ds.name)):
1609 LOG.debug('Datastore "%s" is connected to the '
1610 'destination cluster', ds.name)
1611 return ds
1612 # find the most suitable datastore on the destination cluster
1613 return ds_util.get_datastore(self._session, cluster_ref,
1614 datastore_regex)
1616 def live_migration(self, context, instance, dest,
1617 post_method, recover_method, block_migration,
1618 migrate_data):
1619 LOG.debug("Live migration data %s", migrate_data, instance=instance)
1620 vm_ref = vm_util.get_vm_ref(self._session, instance)
1621 cluster_name = migrate_data.cluster_name
1622 cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
1623 cluster_name)
1624 datastore_regex = re.compile(migrate_data.datastore_regex)
1625 res_pool_ref = vm_util.get_res_pool_ref(self._session, cluster_ref)
1626 # find a datastore where the instance will be migrated to
1627 ds = self._find_datastore_for_migration(instance, vm_ref, cluster_ref,
1628 datastore_regex)
1629 if ds is None:
1630 LOG.error("Cannot find datastore", instance=instance)
1631 raise exception.HostNotFound(host=dest)
1632 LOG.debug("Migrating instance to datastore %s", ds.name,
1633 instance=instance)
1634 # find ESX host in the destination cluster which is connected to the
1635 # target datastore
1636 esx_host = self._find_esx_host(cluster_ref, ds.ref)
1637 if esx_host is None:
1638 LOG.error("Cannot find ESX host for live migration, cluster: %s, "
1639 "datastore: %s", migrate_data.cluster_name, ds.name,
1640 instance=instance)
1641 raise exception.HostNotFound(host=dest)
1642 # Update networking backings
1643 network_info = instance.get_network_info()
1644 client_factory = self._session.vim.client.factory
1645 devices = []
1646 hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref)
1647 vif_model = instance.image_meta.properties.get('hw_vif_model',
1648 constants.DEFAULT_VIF_MODEL)
1649 for vif in network_info:
1650 vif_info = vmwarevif.get_vif_dict(
1651 self._session, cluster_ref, vif_model, vif)
1652 device = vmwarevif.get_network_device(hardware_devices,
1653 vif['address'])
1654 devices.append(vm_util.update_vif_spec(client_factory, vif_info,
1655 device))
1657 LOG.debug("Migrating instance to cluster '%s', datastore '%s' and "
1658 "ESX host '%s'", cluster_name, ds.name, esx_host,
1659 instance=instance)
1660 try:
1661 vm_util.relocate_vm(self._session, vm_ref, res_pool_ref,
1662 ds.ref, esx_host, devices=devices)
1663 LOG.info("Migrated instance to host %s", dest, instance=instance)
1664 except Exception:
1665 with excutils.save_and_reraise_exception():
1666 recover_method(context, instance, dest, migrate_data)
1667 post_method(context, instance, dest, block_migration, migrate_data)
1669 def poll_rebooting_instances(self, timeout, instances):
1670 """Poll for rebooting instances."""
1671 ctxt = nova_context.get_admin_context()
1673 instances_info = dict(instance_count=len(instances),
1674 timeout=timeout)
1676 if instances_info["instance_count"] > 0:
1677 LOG.info("Found %(instance_count)d hung reboots "
1678 "older than %(timeout)d seconds", instances_info)
1680 for instance in instances:
1681 LOG.info("Automatically hard rebooting", instance=instance)
1682 self.compute_api.reboot(ctxt, instance, "HARD")
1684 def get_info(self, instance):
1685 """Return data about the VM instance."""
1686 vm_ref = vm_util.get_vm_ref(self._session, instance)
1688 lst_properties = ["runtime.powerState"]
1689 try:
1690 vm_props = self._session._call_method(vutil,
1691 "get_object_properties_dict",
1692 vm_ref,
1693 lst_properties)
1694 except vexc.ManagedObjectNotFoundException:
1695 raise exception.InstanceNotFound(instance_id=instance.uuid)
1696 return hardware.InstanceInfo(
1697 state=constants.POWER_STATES[vm_props['runtime.powerState']])
1699 def _get_diagnostics(self, instance):
1700 """Return data about VM diagnostics."""
1701 vm_ref = vm_util.get_vm_ref(self._session, instance)
1702 lst_properties = ["summary.config",
1703 "summary.quickStats",
1704 "summary.runtime"]
1705 vm_props = self._session._call_method(vutil,
1706 "get_object_properties_dict",
1707 vm_ref,
1708 lst_properties)
1709 data = {}
1710 # All of values received are objects. Convert them to dictionaries
1711 for value in vm_props.values():
1712 prop_dict = vim_util.object_to_dict(value, list_depth=1)
1713 data.update(prop_dict)
1714 return data
1716 def get_diagnostics(self, instance):
1717 """Return data about VM diagnostics."""
1718 data = self._get_diagnostics(instance)
1719 # Add a namespace to all of the diagnostsics
1720 return {'vmware:' + k: v for k, v in data.items()}
1722 def get_instance_diagnostics(self, instance):
1723 """Return data about VM diagnostics."""
1724 data = self._get_diagnostics(instance)
1725 state = data.get('powerState')
1726 if state:
1727 state = power_state.STATE_MAP[constants.POWER_STATES[state]]
1728 uptime = data.get('uptimeSeconds', 0)
1729 config_drive = configdrive.required_by(instance)
1730 diags = objects.Diagnostics(state=state,
1731 driver='vmwareapi',
1732 config_drive=config_drive,
1733 hypervisor_os='esxi',
1734 uptime=uptime)
1735 diags.memory_details = objects.MemoryDiagnostics(
1736 maximum = data.get('memorySizeMB', 0),
1737 used=data.get('guestMemoryUsage', 0))
1738 # TODO(garyk): add in cpu, nic and disk stats
1739 return diags
1741 def _get_vnc_console_connection(self, instance):
1742 """Return connection info for a vnc console."""
1743 vm_ref = vm_util.get_vm_ref(self._session, instance)
1744 opt_value = self._session._call_method(vutil,
1745 'get_object_property',
1746 vm_ref,
1747 vm_util.VNC_CONFIG_KEY)
1748 if opt_value:
1749 port = int(opt_value.value)
1750 else:
1751 raise exception.ConsoleTypeUnavailable(console_type='vnc')
1753 return {'port': port,
1754 'internal_access_path': None}
1756 @staticmethod
1757 def _get_machine_id_str(network_info):
1758 machine_id_str = ''
1759 for vif in network_info:
1760 # TODO(vish): add support for dns2
1761 # TODO(sateesh): add support for injection of ipv6 configuration
1762 network = vif['network']
1763 ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
1764 subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
1765 if len(subnets_v4) > 0:
1766 if len(subnets_v4[0]['ips']) > 0:
1767 ip_v4 = subnets_v4[0]['ips'][0]
1768 if len(subnets_v4[0]['dns']) > 0:
1769 dns = subnets_v4[0]['dns'][0]['address']
1771 netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
1772 gateway_v4 = subnets_v4[0]['gateway']['address']
1773 broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
1775 interface_str = ";".join([vif['address'],
1776 ip_v4 and ip_v4['address'] or '',
1777 netmask_v4 or '',
1778 gateway_v4 or '',
1779 broadcast_v4 or '',
1780 dns or ''])
1781 machine_id_str = machine_id_str + interface_str + '#'
1782 return machine_id_str
1784 def _set_machine_id(self, client_factory, instance, network_info,
1785 vm_ref=None):
1786 """Set the machine id of the VM for guest tools to pick up
1787 and reconfigure the network interfaces.
1788 """
1789 if vm_ref is None:
1790 vm_ref = vm_util.get_vm_ref(self._session, instance)
1792 machine_id_change_spec = vm_util.get_machine_id_change_spec(
1793 client_factory,
1794 self._get_machine_id_str(network_info))
1796 LOG.debug("Reconfiguring VM instance to set the machine id",
1797 instance=instance)
1798 vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
1799 LOG.debug("Reconfigured VM instance to set the machine id",
1800 instance=instance)
1802 @utils.synchronized('vmware.get_and_set_vnc_port')
1803 def _get_and_set_vnc_config(self, client_factory, instance, vm_ref):
1804 """Set the vnc configuration of the VM."""
1805 port = vm_util.get_vnc_port(self._session)
1806 vnc_config_spec = vm_util.get_vnc_config_spec(
1807 client_factory, port)
1809 LOG.debug("Reconfiguring VM instance to enable vnc on "
1810 "port - %(port)s", {'port': port},
1811 instance=instance)
1812 vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
1813 LOG.debug("Reconfigured VM instance to enable vnc on "
1814 "port - %(port)s", {'port': port},
1815 instance=instance)
1817 def _get_ds_browser(self, ds_ref):
1818 ds_ref_value = vutil.get_moref_value(ds_ref)
1819 ds_browser = self._datastore_browser_mapping.get(ds_ref_value)
1820 if not ds_browser:
1821 ds_browser = self._session._call_method(vutil,
1822 "get_object_property",
1823 ds_ref,
1824 "browser")
1825 self._datastore_browser_mapping[ds_ref_value] = ds_browser
1826 return ds_browser
1828 def _create_folder_if_missing(self, ds_name, ds_ref, folder):
1829 """Create a folder if it does not exist.
1831 Currently there are two folder that are required on the datastore
1832 - base folder - the folder to store cached images
1833 - temp folder - the folder used for snapshot management and
1834 image uploading
1835 This method is aimed to be used for the management of those
1836 folders to ensure that they are created if they are missing.
1837 The ds_util method mkdir will be used to check if the folder
1838 exists. If this throws and exception 'FileAlreadyExistsException'
1839 then the folder already exists on the datastore.
1840 """
1841 path = ds_obj.DatastorePath(ds_name, folder)
1842 dc_info = self.get_datacenter_ref_and_name(ds_ref)
1843 try:
1844 ds_util.mkdir(self._session, path, dc_info.ref)
1845 LOG.debug("Folder %s created.", path)
1846 except vexc.FileAlreadyExistsException:
1847 # NOTE(hartsocks): if the folder already exists, that
1848 # just means the folder was prepped by another process.
1849 pass
1851 def check_cache_folder(self, ds_name, ds_ref):
1852 """Check that the cache folder exists."""
1853 self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
1855 def inject_network_info(self, instance, network_info):
1856 """inject network info for specified instance."""
1857 # Set the machine.id parameter of the instance to inject
1858 # the NIC configuration inside the VM
1859 client_factory = self._session.vim.client.factory
1860 self._set_machine_id(client_factory, instance, network_info)
1862 def manage_image_cache(self, context, instances):
1863 if not CONF.image_cache.remove_unused_base_images:
1864 LOG.debug("Image aging disabled. Aging will not be done.")
1865 return
1867 datastores = ds_util.get_available_datastores(self._session,
1868 self._cluster,
1869 self._datastore_regex)
1870 datastores_info = []
1871 for ds in datastores:
1872 dc_info = self.get_datacenter_ref_and_name(ds.ref)
1873 datastores_info.append((ds, dc_info))
1874 self._imagecache.update(context, instances, datastores_info)
1876 def _get_valid_vms_from_retrieve_result(self, retrieve_result):
1877 """Returns list of valid vms from RetrieveResult object."""
1878 lst_vm_names = []
1879 with vutil.WithRetrieval(self._session.vim, retrieve_result) as \
1880 objects:
1881 for vm in objects:
1882 vm_uuid = None
1883 conn_state = None
1884 for prop in vm.propSet:
1885 if prop.name == "runtime.connectionState":
1886 conn_state = prop.val
1887 elif prop.name == 'config.extraConfig["nvp.vm-uuid"]':
1888 vm_uuid = prop.val.value
1889 # Ignore VM's that do not have nvp.vm-uuid defined
1890 if not vm_uuid:
1891 continue
1892 # Ignoring the orphaned or inaccessible VMs
1893 if conn_state not in ["orphaned", "inaccessible"]:
1894 lst_vm_names.append(vm_uuid)
1896 return lst_vm_names
1898 def instance_exists(self, instance):
1899 try:
1900 vm_util.get_vm_ref(self._session, instance)
1901 return True
1902 except exception.InstanceNotFound:
1903 return False
1905 def attach_interface(self, context, instance, image_meta, vif):
1906 """Attach an interface to the instance."""
1907 vif_model = image_meta.properties.get('hw_vif_model',
1908 constants.DEFAULT_VIF_MODEL)
1909 vif_model = vm_util.convert_vif_model(vif_model)
1910 vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
1911 vif_model, vif)
1912 vm_ref = vm_util.get_vm_ref(self._session, instance)
1913 # Ensure that there is not a race with the port index management
1914 with lockutils.lock(instance.uuid,
1915 lock_file_prefix='nova-vmware-hot-plug'):
1916 port_index = vm_util.get_attach_port_index(self._session, vm_ref)
1917 client_factory = self._session.vim.client.factory
1918 extra_specs = self._get_extra_specs(instance.flavor)
1920 attach_config_spec = vm_util.get_network_attach_config_spec(
1921 client_factory, vif_info, port_index,
1922 extra_specs.vif_limits)
1923 LOG.debug("Reconfiguring VM to attach interface",
1924 instance=instance)
1925 try:
1926 vm_util.reconfigure_vm(self._session, vm_ref,
1927 attach_config_spec)
1928 except Exception as e:
1929 LOG.error('Attaching network adapter failed. Exception: %s',
1930 e, instance=instance)
1931 raise exception.InterfaceAttachFailed(
1932 instance_uuid=instance.uuid)
1934 self._network_api.update_instance_vnic_index(
1935 context, instance, vif, port_index)
1937 LOG.debug("Reconfigured VM to attach interface", instance=instance)
1939 def detach_interface(self, context, instance, vif):
1940 """Detach an interface from the instance."""
1941 vm_ref = vm_util.get_vm_ref(self._session, instance)
1942 # Ensure that there is not a race with the port index management
1943 with lockutils.lock(instance.uuid,
1944 lock_file_prefix='nova-vmware-hot-plug'):
1945 port_index = vm_util.get_vm_detach_port_index(self._session,
1946 vm_ref,
1947 vif['id'])
1948 if port_index is None:
1949 msg = _("No device with interface-id %s exists on "
1950 "VM") % vif['id']
1951 raise exception.NotFound(msg)
1953 hardware_devices = vm_util.get_hardware_devices(self._session,
1954 vm_ref)
1955 device = vmwarevif.get_network_device(hardware_devices,
1956 vif['address'])
1957 if device is None:
1958 msg = _("No device with MAC address %s exists on the "
1959 "VM") % vif['address']
1960 raise exception.NotFound(msg)
1962 self._network_api.update_instance_vnic_index(
1963 context, instance, vif, None)
1965 client_factory = self._session.vim.client.factory
1966 detach_config_spec = vm_util.get_network_detach_config_spec(
1967 client_factory, device, port_index)
1968 LOG.debug("Reconfiguring VM to detach interface",
1969 instance=instance)
1970 try:
1971 vm_util.reconfigure_vm(self._session, vm_ref,
1972 detach_config_spec)
1973 except Exception as e:
1974 LOG.error('Detaching network adapter failed. Exception: %s',
1975 e, instance=instance)
1976 raise exception.InterfaceDetachFailed(
1977 instance_uuid=instance.uuid)
1978 LOG.debug("Reconfigured VM to detach interface", instance=instance)
1980 def _use_disk_image_as_full_clone(self, vm_ref, vi):
1981 """Uses cached image disk by copying it into the VM directory."""
1983 instance_folder = vi.instance.uuid
1984 root_disk_name = "%s.vmdk" % vi.instance.uuid
1985 root_disk_ds_loc = vi.datastore.build_path(instance_folder,
1986 root_disk_name)
1988 vm_util.copy_virtual_disk(
1989 self._session,
1990 vi.dc_info.ref,
1991 str(vi.cache_image_path),
1992 str(root_disk_ds_loc))
1994 self._extend_if_required(
1995 vi.dc_info, vi.ii, vi.instance, str(root_disk_ds_loc))
1997 self._volumeops.attach_disk_to_vm(
1998 vm_ref, vi.instance,
1999 vi.ii.adapter_type, vi.ii.disk_type,
2000 str(root_disk_ds_loc),
2001 vi.root_gb * units.Mi, False,
2002 disk_io_limits=vi._extra_specs.disk_io_limits)
2004 def _sized_image_exists(self, sized_disk_ds_loc, ds_ref):
2005 ds_browser = self._get_ds_browser(ds_ref)
2006 return ds_util.file_exists(
2007 self._session, ds_browser, sized_disk_ds_loc.parent,
2008 sized_disk_ds_loc.basename)
2010 def _use_disk_image_as_linked_clone(self, vm_ref, vi):
2011 """Uses cached image as parent of a COW child in the VM directory."""
2013 sized_image_disk_name = "%s.vmdk" % vi.ii.image_id
2014 if vi.root_gb > 0:
2015 sized_image_disk_name = "%s.%s.vmdk" % (vi.ii.image_id, vi.root_gb)
2016 sized_disk_ds_loc = vi.cache_image_folder.join(sized_image_disk_name)
2018 # Ensure only a single thread extends the image at once.
2019 # We do this by taking a lock on the name of the extended
2020 # image. This allows multiple threads to create resized
2021 # copies simultaneously, as long as they are different
2022 # sizes. Threads attempting to create the same resized copy
2023 # will be serialized, with only the first actually creating
2024 # the copy.
2025 #
2026 # Note that the object is in a per-nova cache directory,
2027 # so inter-nova locking is not a concern. Consequently we
2028 # can safely use simple thread locks.
2030 with lockutils.lock(str(sized_disk_ds_loc),
2031 lock_file_prefix='nova-vmware-image'):
2033 if not self._sized_image_exists(sized_disk_ds_loc,
2034 vi.datastore.ref):
2035 LOG.debug("Copying root disk of size %sGb", vi.root_gb,
2036 instance=vi.instance)
2037 try:
2038 vm_util.copy_virtual_disk(
2039 self._session,
2040 vi.dc_info.ref,
2041 str(vi.cache_image_path),
2042 str(sized_disk_ds_loc))
2043 except Exception as e:
2044 LOG.warning("Root disk file creation failed - %s",
2045 e, instance=vi.instance)
2046 with excutils.save_and_reraise_exception():
2047 LOG.error('Failed to copy cached image %(source)s to '
2048 '%(dest)s for resize: %(error)s',
2049 {'source': vi.cache_image_path,
2050 'dest': sized_disk_ds_loc,
2051 'error': e},
2052 instance=vi.instance)
2053 try:
2054 ds_util.file_delete(self._session,
2055 sized_disk_ds_loc,
2056 vi.dc_info.ref)
2057 except vexc.FileNotFoundException:
2058 # File was never created: cleanup not
2059 # required
2060 pass
2062 # Resize the copy to the appropriate size. No need
2063 # for cleanup up here, as _extend_virtual_disk
2064 # already does it
2065 self._extend_if_required(
2066 vi.dc_info, vi.ii, vi.instance, str(sized_disk_ds_loc))
2068 # Associate the sized image disk to the VM by attaching to the VM a
2069 # COW child of said disk.
2070 self._volumeops.attach_disk_to_vm(
2071 vm_ref, vi.instance,
2072 vi.ii.adapter_type, vi.ii.disk_type,
2073 str(sized_disk_ds_loc),
2074 vi.root_gb * units.Mi, vi.ii.linked_clone,
2075 disk_io_limits=vi._extra_specs.disk_io_limits)
2077 def _use_iso_image(self, vm_ref, vi):
2078 """Uses cached image as a bootable virtual cdrom."""
2080 self._attach_cdrom_to_vm(
2081 vm_ref, vi.instance, vi.datastore.ref,
2082 str(vi.cache_image_path))
2084 # Optionally create and attach blank disk
2085 if vi.root_gb > 0:
2086 instance_folder = vi.instance.uuid
2087 root_disk_name = "%s.vmdk" % vi.instance.uuid
2088 root_disk_ds_loc = vi.datastore.build_path(instance_folder,
2089 root_disk_name)
2091 # It is pointless to COW a blank disk
2092 linked_clone = False
2094 vm_util.create_virtual_disk(
2095 self._session, vi.dc_info.ref,
2096 vi.ii.adapter_type,
2097 vi.ii.disk_type,
2098 str(root_disk_ds_loc),
2099 vi.root_gb * units.Mi)
2101 self._volumeops.attach_disk_to_vm(
2102 vm_ref, vi.instance,
2103 vi.ii.adapter_type, vi.ii.disk_type,
2104 str(root_disk_ds_loc),
2105 vi.root_gb * units.Mi, linked_clone,
2106 disk_io_limits=vi._extra_specs.disk_io_limits)
2108 def get_datacenter_ref_and_name(self, ds_ref):
2109 """Get the datacenter name and the reference."""
2110 return ds_util.get_dc_info(self._session, ds_ref)
2112 def list_instances(self):
2113 """Lists the VM instances that are registered with vCenter cluster."""
2114 properties = ['runtime.connectionState',
2115 'config.extraConfig["nvp.vm-uuid"]']
2116 LOG.debug("Getting list of instances from cluster %s",
2117 self._cluster)
2118 vms = []
2119 if self._root_resource_pool:
2120 vms = self._session._call_method(
2121 vim_util, 'get_inner_objects', self._root_resource_pool, 'vm',
2122 'VirtualMachine', properties)
2123 lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
2125 LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
2126 return lst_vm_names
2128 def get_vnc_console(self, instance):
2129 """Return connection info for a vnc console using vCenter logic."""
2131 # vCenter does not run virtual machines and does not run
2132 # a VNC proxy. Instead, you need to tell OpenStack to talk
2133 # directly to the ESX host running the VM you are attempting
2134 # to connect to via VNC.
2136 vnc_console = self._get_vnc_console_connection(instance)
2137 host_name = vm_util.get_host_name_for_vm(
2138 self._session,
2139 instance)
2140 vnc_console['host'] = host_name
2142 # NOTE: VM can move hosts in some situations. Debug for admins.
2143 LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
2144 {'uuid': instance.uuid, 'host_name': host_name},
2145 instance=instance)
2146 return ctype.ConsoleVNC(**vnc_console)
2148 def get_mks_console(self, instance):
2149 vm_ref = vm_util.get_vm_ref(self._session, instance)
2150 ticket = self._session._call_method(self._session.vim,
2151 'AcquireTicket',
2152 vm_ref,
2153 ticketType='mks')
2154 thumbprint = ticket.sslThumbprint.replace(':', '').lower()
2155 mks_auth = {'ticket': ticket.ticket,
2156 'cfgFile': ticket.cfgFile,
2157 'thumbprint': thumbprint}
2158 internal_access_path = jsonutils.dumps(mks_auth)
2159 return ctype.ConsoleMKS(ticket.host, ticket.port, internal_access_path)